diff --git a/.cloudcannon/postbuild b/.cloudcannon/postbuild new file mode 100755 index 000000000..e336d37e0 --- /dev/null +++ b/.cloudcannon/postbuild @@ -0,0 +1,7 @@ +#!/bin/bash + +## Find and fix broken images and links caused by how the baseURL is set in the config.yml +find public -type f -name "*.html" -exec sed -i 's/="\/\//="\//g' {} \; + +## Add the CloudCannon 'editable' class to content divs +find public -type f -name "index.html" -exec sed -i 's/class="content/& editable/g' {} \; diff --git a/.cloudcannon/prebuild b/.cloudcannon/prebuild new file mode 100755 index 000000000..cf037d189 --- /dev/null +++ b/.cloudcannon/prebuild @@ -0,0 +1,29 @@ +#!/bin/bash + +## check the version of the hugo theme currently being used +## if it is different from the one defined in the go.mod file +## then run hugo mod get -u to update the theme + +theme_version=$(grep -o 'v[0-9]\+\(\.[0-9]\+\)\{2\}' go.mod) + +current_theme_version=$(hugo mod graph | grep -o 'v[0-9]\+\(\.[0-9]\+\)\{2\}') + +printf "Theme version in go.mod is: %s \n" "$theme_version" + +printf "Theme version in use by Hugo is: %s \n" "$current_theme_version" + + +# if the theme version in go.mod is the same as the one in use by Hugo, run hugo mod clean to clear any outdated theme files from the cache +# if the theme version in go.mod is different from the one in use by Hugo, update it by running hugo mod get -u + +if [ "$theme_version" == "$current_theme_version" ]; then + printf "Theme version in go.mod is the same as the one in use by Hugo. Cleaning the cache. \n" + hugo mod clean ; +elif [ "$theme_version" != "$current_theme_version" ]; then + printf "Updating theme version to %s \n" "$current_theme_version" + hugo mod get -u ; +# if an error occurs, exit with a non-zero status code +else + printf "An error occurred: unable to retrieve the latest version of the Hugo theme.\n" + exit 1 +fi diff --git a/.cloudcannon/preinstall b/.cloudcannon/preinstall new file mode 100644 index 000000000..ccd94ee41 --- /dev/null +++ b/.cloudcannon/preinstall @@ -0,0 +1 @@ +install-hugo 0.134.2 diff --git a/.cloudcannon/schemas/concept.md b/.cloudcannon/schemas/concept.md new file mode 100644 index 000000000..58412247a --- /dev/null +++ b/.cloudcannon/schemas/concept.md @@ -0,0 +1,31 @@ +--- +title: +# Remove or change to false to turn off the right-hand in-page ToC +toc: true +# Add a short description (150 chars or less) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +# Lower numbers appear higher in the document list +weight: +tags: [ "docs" ] +--- + +## Overview + +Briefly describe the goal of this document, that is, what the user will learn or accomplish by reading what follows. + +## Concept 1 - format as a noun phrase + +This is where you explain the concept. Provide information that will help the user understand what the element/feature is and how it fits into the overall product. + +Organize content in this section with H3 and H4 headings. + +## Concept 2 - format as a noun phrase + +## Concept 3 - format as a noun phrase + +## What's Next + +- Provide up to 5 links to related topics (optional). +- Format as a bulleted list. diff --git a/.cloudcannon/schemas/default.md b/.cloudcannon/schemas/default.md new file mode 100644 index 000000000..e3a9186f0 --- /dev/null +++ b/.cloudcannon/schemas/default.md @@ -0,0 +1,65 @@ +--- +title: +# Remove or set to false to turn off the right-hand in-page ToC +toc: true +# Add a short description (150 chars or less) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +# Lower numbers appear higher in the document list +weight: +tags: [ "docs" ] +--- + +## Overview + +Briefly describe the goal of this document, that is, what the user will learn or accomplish by reading what follows. + +Introduce and explain any new concepts the user may need to understand before proceeding. + +## Before You Begin + +To complete the instructions in this guide, you need the following: + +1. Provide any prerequisites here. +2. Format as a numbered or bulleted list as appropriate. +3. Keep the list entries grammatically parallel.1. Provide any prerequisites here. + +## Goal 1 - write as a verb phrase + +Add introductory text. Say what the user will be doing. + +To do xzy, take the following steps: + +1. This is where you provide the steps that the user must take to accomplish the goal. + + ```bash + code examples should be nested within the list + ``` + +2. Format as numbered lists. + + {{< note >}}Add notes like this.{{}} + +3. If there is only one step, you don't need to format it as a numbered list. + +## Goal 2 - write as a verb phrase + +## Goal 3 - write as a verb phrase + +## Discussion + +Use the discussion section to expand on the information presented in the steps above. + +This section contains the "why" information. + +This information lives at the end of the document so that users who just want to follow the steps don't have to scroll through a wall of explanatory text to find them. + +## Verification + +Explain how the user can verify the steps completed successfully. + +## What's Next + +- Provide up to 5 links to related topics (optional). +- Format as a bulleted list. diff --git a/.cloudcannon/schemas/headless-collection.md b/.cloudcannon/schemas/headless-collection.md new file mode 100644 index 000000000..3d65eaa0f --- /dev/null +++ b/.cloudcannon/schemas/headless-collection.md @@ -0,0 +1,3 @@ +--- +headless: true +--- \ No newline at end of file diff --git a/.cloudcannon/schemas/includes.md b/.cloudcannon/schemas/includes.md new file mode 100644 index 000000000..8b447a2cc --- /dev/null +++ b/.cloudcannon/schemas/includes.md @@ -0,0 +1,3 @@ +--- +docs: "" +--- \ No newline at end of file diff --git a/.cloudcannon/schemas/nms/policy.md b/.cloudcannon/schemas/nms/policy.md new file mode 100644 index 000000000..074bf86b4 --- /dev/null +++ b/.cloudcannon/schemas/nms/policy.md @@ -0,0 +1,176 @@ +--- +title: +# Remove or set to false to turn off the right-hand in-page ToC +toc: true +# Add a short description (150 chars or less) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +# Lower numbers appear higher in the document list +weight: +tags: [ "docs" ] +--- + +## Overview + + + +--- + +## About XYZ Policy + + + +#### Intended Audience + + + +--- + +## Workflow for Applying Policy + + + +--- + +## Policy Settings + + + +The following table lists the configurable settings and their default values for the policy. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Datatype | Possible Values | Description | Required | Default | +|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------| +| `users.id` | integer | A unique int >= 1 | Description for value. | Yes | System assigned | +| `users.name` | string | Example: `Jane Doe` | A short description of what the field is used for. | Yes | Add the default value | +| `user.age` | integer | 1–110 | Description for the value | Yes | | + +{{< /bootstrap-table >}} + + +--- + +## Adding XYZ Policy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create an XYZ policy using the REST API, send an HTTP `POST` request to the Add-Endpoint-Name-Here endpoint. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Method | Endpoint | +|--------|---------------------| +| `POST` | `/path/to/endpoint` | + +{{}} + + +
+JSON request + +``` json +{ + "users": [ + { + "id": 1, + "name": "John Doe", + "age": 24 + }, + { + "id": 2, + "name": "Jane Doe", + "age": 28 + } + ] +} +``` + +
+ +{{%/tab%}} + +{{%tab name="UI"%}} + +To create an XYZ policy using the web interface: + +1. Go to the FQDN for your NGINX Management Suite host in a web browser and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. Add other steps here +3. As a numbered list. + +{{%/tab%}} + +{{
}} + +--- + +## Verify the Policy + + + +Confirm that the policy has been set up and configured correctly by taking these steps: + +- Add steps to verify policy was applied successfully to the the management plane. + +Confirm the policy is being enforced: + +- Add steps to verify policy is being enforced on the data plane. What can users expect to see? + +--- + +## Troubleshooting + + + +For help resolving common issues when setting up and configuring the policy, follow the steps in this section. If you cannot find a solution to your specific issue, reach out to [NGINX Customer Support]({{< relref "support/contact-support.md" >}}) for assistance. + +### Issue 1 + +Add a description for the issue. Include any error messages users might see. + +Resolution/Workaround: + +1. Add steps here +2. As a +3. numbered list. + +### Issue 2 + +Add a description for the issue. Include any error messages users might see. + +Resolution/Workaround: + +1. Add steps here +2. As a +3. numbered list. diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..4950eb3f9 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +# Set the default behavior for correcting line endings, +# in case people don't have core.autocrlf set. +* text=auto + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 4fdc9cd20..1d3d925c4 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -24,8 +24,8 @@ A clear and concise description of what you expected to happen. ### Your environment -- Version of this project or specific commit - +- Version/release of this project or specific commit + - Target deployment platform ### Additional context diff --git a/.github/about.txt b/.github/about.txt new file mode 100644 index 000000000..d556164ce --- /dev/null +++ b/.github/about.txt @@ -0,0 +1,16 @@ +# GitHub Configurations + +This directory contains settings used by GitHub for Actions, identifying code reviewers, issue and pull request templates, and more. + +## Workflows + +### check-broken-links + +The check-broken-links workflow relies on the following projects: + +- https://github.com/BoundfoxStudios/action-hugo-link-check +- https://github.com/fenneclab/hugo-bin/releases + +The hugo-bin project lets you add Hugo as a dependency in your npm package.json file. The action-hugo-link-check Action requires this, as that is how it identifies which version of Hugo to run to build your docs. + +> Note: The release version numbers in the hugo-bin project do not align with the actual Hugo releases. Check the README and versions in the hugo-bin repo to find out which releases correspond to Hugo releases. diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml new file mode 100644 index 000000000..235213cb4 --- /dev/null +++ b/.github/workflows/build-push.yml @@ -0,0 +1,118 @@ +name: Build and deploy (docs) +on: + workflow_call: + inputs: + environment: + description: "Deployment environment. Must be one of preview, dev, staging, or prod" + required: true + default: preview + type: string + secrets: + AZURE_CREDENTIALS_DOCS: + required: true + AZURE_KEY_VAULT_DOCS: + required: true + workflow_dispatch: + inputs: + environment: + description: "Environment to deploy to" + required: true + default: "preview" + type: choice + options: + - preview + - dev + - staging + - prod + hugo_theme_override: + description: "Override hugo theme (leave blank to use latest version)" + required: false + default: "" + type: string + pull_request: + branches: + - "*" + push: + branches: + - "nim-release-2.18.0" + +env: + FRONT_DOOR_USERNAME: ${{ secrets.FRONT_DOOR_USERNAME }} + FRONT_DOOR_PASSWORD: ${{ secrets.FRONT_DOOR_PASSWORD }} + GITHUB_PR_NUMBER: ${{ github.event.pull_request.number }} +jobs: + prod-check-branch: + runs-on: ubuntu-24.04 + steps: + - name: Output variables + run: | + echo "Environment: ${{ inputs.environment }}" + echo "Branch: ${{ github.ref }}" + - name: Checks to see that main branch is selected if deploying to prod + if: ${{ inputs.environment == 'prod' && github.ref != 'refs/heads/main' }} + run: | + echo "Deployment to 'prod' can only be done from the 'main' branch." + exit 1 + + call-docs-build-push: + needs: prod-check-branch + uses: nginxinc/docs-actions/.github/workflows/docs-build-push.yml@9c59fab05a8131f4d691ba6ea2b6a119f3ef832a # v1.0.7 + with: + production_url_path: "" + preview_url_path: "/previews/docs" + docs_source_path: "public" + docs_build_path: "./" + doc_type: "hugo" + environment: ${{inputs.environment}} + force_hugo_theme_version: ${{inputs.hugo_theme_override}} + auto_deploy_branch: "nim-release-2.18.0" + auto_deploy_env: "staging" + secrets: + AZURE_CREDENTIALS: ${{secrets.AZURE_CREDENTIALS_DOCS}} + AZURE_KEY_VAULT: ${{secrets.AZURE_KEY_VAULT_DOCS}} + + lighthouseci: + if: github.event.pull_request + needs: call-docs-build-push + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.workflow_run.head_branch }} + - uses: actions/setup-node@v4 + with: + node-version: 18 + - name: Installing packages + run: npm install + - name: Generating lighthouse reports for PR and main... + run: | + node lighthouse-script.js + - name: Compare the artifacts for negative differences in performance + continue-on-error: true + run: | + FIELDS=("performance" "accessibility") + for FIELD in "${FIELDS[@]}"; do + PR_VALUE=$(cat lighthouse-reports/pr-report.json | jq -r ".categories.$FIELD.score") + MAIN_VALUE=$(cat lighthouse-reports/main-report.json | jq -r ".categories.$FIELD.score") + echo "$FIELD: PR - $PR_VALUE | Main - $MAIN_VALUE" + + if [ $FIELD = "performance" ]; then + LOWER_BOUND=$(echo "$MAIN_VALUE - 0.05" | bc) + UPPER_BOUND=$(echo "$MAIN_VALUE + 0.05" | bc) + if (( $(echo "$PR_VALUE < $LOWER_BOUND" | bc -l) || $(echo "$PR_VALUE > $UPPER_BOUND" | bc -l) )); then + echo "Error: $FIELD score in PR ($PR_VALUE) is less than in MAIN ($MAIN_VALUE)" + exit 1 + fi + else + if (( $(echo "$PR_VALUE < $MAIN_VALUE" | bc -l) )); then + echo "Error: $FIELD score in PR ($PR_VALUE) is less than in MAIN ($MAIN_VALUE)" + exit 1 + fi + fi + done + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: lighthouse-reports + path: lighthouse-reports/ + retention-days: 30 \ No newline at end of file diff --git a/.github/workflows/check-broken-links.yml b/.github/workflows/check-broken-links.yml new file mode 100644 index 000000000..05d7baf30 --- /dev/null +++ b/.github/workflows/check-broken-links.yml @@ -0,0 +1,25 @@ +name: Check for Broken Links +# Generates the Hugo production site and checks for broken links +on: + workflow_dispatch: + # Run on pull requests with changed md files + # pull_request: + # paths: + # - 'content/**.md' + +jobs: + check-broken-links: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - uses: BoundfoxStudios/action-hugo-link-check@v2.0.3 + with: + hugo-root: ./ + hugo-content-dir: ./content + hugo-config: ./config/production/config.yaml + skip: github\.com\/nginxinc\/docs\/(tree|edit|new|issues\/new),twitter\.com\/nginx,linkedin\.com,facebook\.com,.*\/nginx-ingress-controller\/,.*\/nginxaas\/azure\/,.*\/nginx-amplify\/,.*\/nginx-agent\/,www\.nginx\.com,nginx\.com,www\.mywebsite\.com,example\.com + hugo-startup-wait-time: 60 + + diff --git a/.github/workflows/coveo.yml b/.github/workflows/coveo.yml new file mode 100644 index 000000000..8fa990fa1 --- /dev/null +++ b/.github/workflows/coveo.yml @@ -0,0 +1,112 @@ +name: Refresh coveo searchToken for docs dev/staging/production +on: + workflow_dispatch: + schedule: + - cron: "0 */23 * * *" + +jobs: + generate-coveo-search-token: + name: Generate Coveo Search Tokens + runs-on: ubuntu-latest + strategy: + matrix: + include: + - env_name: dev + env_api_key: "COVEO_API_DEV" + env_coveo_org_id: "f5networkx1h1607h" + - env_name: staging + env_api_key: "COVEO_API_STAGING" + env_coveo_org_id: "f5networksnonproduction1xqykzabw" + - env_name: prod + env_api_key: "COVEO_API_PROD" + env_coveo_org_id: "f5networksproduction5vkhn00h" + steps: + - name: Install jq + run: sudo apt-get install jq + + - name: Generating token for ${{matrix.env_name}} ... + env: + COVEO_API_KEY: ${{secrets[matrix.env_api_key]}} + COVEO_SEARCH_HUB: "HUB_ES_Nginx_Docs_And_Org" + run: | + RESPONSE=$(curl -w "\nHTTP_CODE: %{http_code}" -s -X POST "https://${{matrix.env_coveo_org_id}}.org.coveo.com/rest/search/v2/token" \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer ${COVEO_API_KEY}" \ + -d '{ + "searchHub": "'${COVEO_SEARCH_HUB}'", + "organization": "'${{matrix.env_coveo_org_id}}'", + "userIds": [ + { + "type": "User", + "name": "anonymous", + "provider": "Email Security Provider" + } + ] + }') + STATUS_CODE=$(echo "$RESPONSE" | grep HTTP_ | awk '{print $2}') + SEARCH_TOKEN=$(echo "$RESPONSE" | sed '$d' | jq -r '.token') + + if [ $STATUS_CODE -ne 200 ]; then + echo "Error: HTTP request failed with status $STATUS_CODE" + exit 1 + fi + if [ "$SEARCH_TOKEN" == "null" ]; then + echo "Error: Failed to extract search token from response" + exit 1 + fi + + mkdir coveo/ + echo "{\"token\": \"$SEARCH_TOKEN\", \"org_id\": \"${{matrix.env_coveo_org_id}}\"}" > coveo/search_token.json + + - name: Upload token for ${{matrix.env_name}} + uses: actions/upload-artifact@v4 + with: + name: ${{matrix.env_name}} + path: "./" + + push-tokens-to-azure: + name: Push coveo search tokens to Azure + runs-on: ubuntu-latest + needs: generate-coveo-search-token + steps: + - name: Download Coveo search token + uses: actions/download-artifact@v4 + + - name: View files + run: ls -R + + - name: Login to Azure + uses: azure/login@v2 + with: + creds: ${{secrets.AZURE_CREDENTIALS_DOCS}} + + - name: Retrieve secrets from Keyvault + id: keyvault + uses: azure/cli@089eac9d8cc39f5d003e94f8b65efc51076c9cbd + with: + inlineScript: | + secrets_get=(productionHostname previewHostname resourceGroupName cdnProfileName cdnName accountName) + for secret_get in ${secrets_get[@]} + do + value=$(az keyvault secret show --name $secret_get --vault-name ${{ secrets.AZURE_KEY_VAULT_DOCS }} --query value --output tsv) + echo "::add-mask::$value" + echo "$secret_get=$value" >> $GITHUB_OUTPUT + done + + - name: Push to Azure container storage + run: | + az storage blob upload-batch \ + -s ./ \ + -d '$web' \ + --account-name ${{steps.keyvault.outputs.accountName}} \ + --overwrite \ + --content-cache-control "no-store" \ + --auth-mode login + + az afd endpoint purge \ + --resource-group ${{steps.keyvault.outputs.resourceGroupName}} \ + --profile-name ${{steps.keyvault.outputs.cdnProfileName}} \ + --endpoint-name ${{steps.keyvault.outputs.cdnName}} \ + --domains docs.nginx.com docs-dev.nginx.com docs-staging.nginx.com \ + --content-paths '/.netlify/functions/*' diff --git a/.github/workflows/daily-deploy.yml b/.github/workflows/daily-deploy.yml new file mode 100644 index 000000000..5d595605e --- /dev/null +++ b/.github/workflows/daily-deploy.yml @@ -0,0 +1,16 @@ +name: Daily Deploy Docs CRON + +on: + workflow_dispatch: + schedule: + # * is a special character in YAML so you have to quote this string + - cron: '0 17 * * *' + - cron: '0 1 * * *' +jobs: + trigger: + uses: nginxinc/docs/.github/workflows/build-push.yml@main + with: + environment: "prod" + secrets: + AZURE_CREDENTIALS_DOCS: ${{secrets.AZURE_CREDENTIALS_DOCS}} + AZURE_KEY_VAULT_DOCS: ${{secrets.AZURE_KEY_VAULT_DOCS}} \ No newline at end of file diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yml new file mode 100644 index 000000000..3b5b58f6c --- /dev/null +++ b/.github/workflows/fossa.yml @@ -0,0 +1,30 @@ +name: Fossa + +on: + workflow_dispatch: + inputs: + branch: + type: string + required: false + default: main + +concurrency: + group: ${{ github.ref_name }}-fossa + cancel-in-progress: true + +permissions: + contents: read + +jobs: + scan: + name: Fossa + runs-on: ubuntu-24.04 + if: ${{ github.event.repository.fork == false }} + steps: + - name: Checkout Repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Scan + uses: fossas/fossa-action@09bcf127dc0ccb4b5a023f6f906728878e8610ba # v1.4.0 + with: + api-key: ${{ secrets.FOSSA_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/linkchecker-bad.yml b/.github/workflows/linkchecker-bad.yml new file mode 100644 index 000000000..9946fb96c --- /dev/null +++ b/.github/workflows/linkchecker-bad.yml @@ -0,0 +1,42 @@ +# This is a TEMPORARY Action that is testing the docs with existing broken URLs +# Once issues have been resolved and the project docs are passing the jobs will be moved into QE LinkChecker + +name: QE LinkChecker Failing + +# This workflow runs on a schedule or can be run manually +on: + workflow_dispatch: + schedule: + # Runs every day at 8am UTC + - cron: '0 8 * * *' + +jobs: + nginx-plus: + runs-on: ubuntu-latest + steps: + # Install LinkChecker + - name: Install LinkChecker + run: sudo apt-get update && sudo apt-get install -y linkchecker + # Run LinkChecker on nginx-plus docs + - name: Run LinkChecker nginx-plus + run: linkchecker https://docs.nginx.com/nginx/ --no-warnings --check-extern --ignore-url ^https://consent.trustarc.com --ignore-url ^http://backend1.example.com --ignore-url ^http://www.example.com --ignore-url ^http://example.com --ignore-url ^https://my-nginx.example.com --ignore-url ^https://www.nginxroute53.com --ignore-url ^http://cafe --ignore-url ^http://192.168.1.23 --ignore-url ^https://company.com --ignore-url ^https://my-nginx-plus.example.com --ignore-url ^https://cognito-idp --ignore-url ^https:///www.okta.com + + nginx-oss: + runs-on: ubuntu-latest + steps: + # Install LinkChecker + - name: Install LinkChecker + run: sudo apt-get update && sudo apt-get install -y linkchecker + # Run LinkChecker on nginx-oss docs + - name: Run LinkChecker nginx-oss + run: linkchecker https://nginx.org/en/docs/ --no-warnings --check-extern + + nginx-agent: + runs-on: ubuntu-latest + steps: + # Install LinkChecker + - name: Install LinkChecker + run: sudo apt-get update && sudo apt-get install -y linkchecker + # Run LinkChecker on nginx-agent docs + - name: Run LinkChecker nginx-agent + run: linkchecker https://docs.nginx.com/nginx-agent/ --no-warnings --check-extern --ignore-url ^https://consent.trustarc.com --ignore-url ^http://localhost diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml new file mode 100644 index 000000000..143052fb8 --- /dev/null +++ b/.github/workflows/linkchecker.yml @@ -0,0 +1,111 @@ +# Using https://github.com/linkchecker/linkchecker to test NGINX Documentation +# Recursively checks all internal and external links in the docs. +# Warnings for redirects are disabled +# To ignore a url use the add to the env.ignore list. eg --ignore-url ^http://www.example.com +# To add or remove a project edit the doc_paths list + +name: QE LinkChecker + +on: + workflow_dispatch: + inputs: + basepath: + description: "Documentation site to test (default: https://docs.nginx.com)" + required: true + default: "https://docs.nginx.com" + type: choice + options: + - "https://docs.nginx.com" + - "https://docs-dev.nginx.com" + - "https://docs-staging.nginx.com" + schedule: + # Runs every day at 7am UTC + - cron: '0 7 * * *' + +env: + defaults: '--no-warnings --check-extern --ignore-url ^https://consent.trustarc.com' + ignore: > + --ignore-url ^http://127.0.0.1 --ignore-url ^http://localhost --ignore-url ^http://serv/ --ignore-url ^https://myserver.host.com + --ignore-url ^https://product.apis.f5.com --ignore-url ^https://product-s.apis.f5.com --ignore-url ^https://linux.die.net/man/ + --ignore-url ^http://www.example.com --ignore-url ^file:// --ignore-url ^https://www.fastbot.de --ignore-url ^https://www.domain.com + --ignore-url ^https://lightstep.com --ignore-url ^https://www.owasp.org/ --ignore-url ^https://www.maxmind.com --ignore-url ^https://www.splunk.com/ + --ignore-url ^https://oauth2.googleapis.com --ignore-url ^https://openidconnect.googleapis.com --ignore-url ^https://www.base64url.com/ + --ignore-url ^https://go.googlesource.com/ --ignore-url ^https://go.googlesource.com/sync --ignore-url ^https://linkerd.io/2.13/ + --ignore-url ^http://www.redirectpage.com/ + --ignore-url ^https://\([a-zA-Z0-9-]+\).nginx.com/nginx-ingress-controller/css + --ignore-url ^https://\([a-zA-Z0-9-]+\).nginx.com/nginxaas/azure/css + --ignore-url ^https://\([a-zA-Z0-9-]+\).nginx.com/nginx-gateway-fabric/css + +jobs: + link-checker: + runs-on: ubuntu-latest + strategy: + matrix: + doc_paths: + - nginx-one + - nginx-instance-manager + - solutions + - nginx-app-protect-dos + - nginx-app-protect-waf + - nginx-ingress-controller + - nginxaas/azure + - nginx-service-mesh + - nginx-amplify + - nginx-controller + - nginx-waf + - nginx-management-suite + - nginx-gateway-fabric + steps: + # Determine and set basepath for schedule or workflow_dispatch + - name: Set Basepath + run: | + if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + echo "basepath=${{ github.event.inputs.basepath }}" >> $GITHUB_ENV + echo "isProduction=${{ github.event.inputs.basepath == 'https://docs.nginx.com'}}" >> $GITHUB_ENV + else + echo "basepath=https://docs.nginx.com" >> $GITHUB_ENV + echo "isProduction=true" >> $GITHUB_ENV + fi + + - name: Login to Azure + if: env.isProduction != 'true' + uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 + with: + creds: ${{secrets.AZURE_CREDENTIALS_DOCS}} + + - name: Retrieve secrets from Keyvault + if: env.isProduction != 'true' + id: keyvault + uses: azure/cli@089eac9d8cc39f5d003e94f8b65efc51076c9cbd + with: + inlineScript: | + secrets_get=(frontdoorUsername frontdoorPassword) + for secret_get in ${secrets_get[@]} + do + value=$(az keyvault secret show --name $secret_get --vault-name ${{ secrets.AZURE_KEY_VAULT_DOCS }} --query value --output tsv) + echo "::add-mask::$value" + echo "$secret_get=$value" >> $GITHUB_OUTPUT + done + + # Setup linkchecker command with optional password + - name: Setup LinkChecker Command + run: | + if [ "${{env.isProduction}}" != "true" ]; then + echo "cmd=echo '${{steps.keyvault.outputs.frontdoorPassword}}' | linkchecker -u '${{steps.keyvault.outputs.frontdoorUsername}}' ${{ env.basepath }}/${{ matrix.doc_paths }}/ ${{ env.defaults }} ${{ env.ignore }}" >> $GITHUB_ENV + else + echo "cmd=linkchecker ${{ env.basepath }}/${{ matrix.doc_paths }}/ ${{ env.defaults }} ${{ env.ignore }}" >> $GITHUB_ENV + fi + + # Install LinkChecker + - name: Install LinkChecker + run: | + sudo apt-get update && sudo apt-get install -y linkchecker + + # Run LinkChecker + - name: Run LinkChecker on ${{ matrix.doc_paths }} + continue-on-error: ${{ env.isProduction != 'true' }} + uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3.0.0 + with: + timeout_minutes: 10 + max_attempts: 3 + command: ${{ env.cmd }} diff --git a/.github/workflows/mend.yml b/.github/workflows/mend.yml new file mode 100644 index 000000000..961722146 --- /dev/null +++ b/.github/workflows/mend.yml @@ -0,0 +1,38 @@ +name: Mend + +on: + workflow_dispatch: + inputs: + branch: + type: string + required: false + default: main + +concurrency: + group: ${{ github.ref_name }}-mend + cancel-in-progress: true + +permissions: + contents: read + +jobs: + scan: + name: Mend + runs-on: ubuntu-24.04 + steps: + - name: Checkout Repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + ref: ${{ inputs.branch && inputs.branch || github.ref }} + + - name: Download agent + run: curl -fsSLJO https://github.com/whitesource/unified-agent-distribution/releases/latest/download/wss-unified-agent.jar + + - name: Verify JAR + run: jarsigner -verify wss-unified-agent.jar + + - name: Scan and upload + env: + PRODUCT_NAME: nginx_documentation_${{ inputs.branch && inputs.branch || github.ref_name }} + PROJECT_NAME: nic + run: java -jar wss-unified-agent.jar -noConfig true -wss.url ${{ secrets.WSS_URL }} -apiKey ${{ secrets.WS_APIKEY_NGINX }} -product ${{ secrets.WS_PRODUCT }} -project ${{ secrets.WS_PROJECT }} -d . \ No newline at end of file diff --git a/.github/workflows/notification.yml b/.github/workflows/notification.yml new file mode 100644 index 000000000..2874ecfd4 --- /dev/null +++ b/.github/workflows/notification.yml @@ -0,0 +1,78 @@ +name: Notifications for Tests +on: + workflow_run: + branches: [main] + workflows: + - "UI validation on prod" + - "QE LinkChecker" + - "Check for Broken Links" + types: [completed] + +permissions: + contents: read + +jobs: + on-failure: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'failure' }} + permissions: + contents: read + actions: read # for 8398a7/action-slack + checks: read + steps: + - name: Retrieve Job Data + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + continue-on-error: true + id: data + with: + script: | + const message = context.payload.workflow_run.head_commit.message + message_sanitized = message.split('\n')[0] + + const check_data = (await github.rest.checks.listForRef({ + owner: context.payload.repository.owner.login, + repo: context.payload.repository.name, + ref: context.payload.workflow_run.head_commit.id, + })).data.check_runs.filter(check_run => check_run.conclusion === 'failure')[0] + + return { + job_name: check_data.name, + job_url: check_data.html_url, + commit_message: message_sanitized, + } + + - name: Send notification + uses: 8398a7/action-slack@28ba43ae48961b90635b50953d216767a6bea486 # v3.16.2 + with: + status: custom + custom_payload: | + { + username: 'Github', + mention: 'channel', + attachments: [{ + title: '[${{ github.event.repository.full_name }}] ${{ github.event.workflow.name }} pipeline has failed (${{ github.event.workflow_run.event }})', + color: 'danger', + fields: [{ + title: 'Commit', + value: ``, + short: false + }, + { + title: 'Failed Job', + value: `<${{ fromJSON(steps.data.outputs.result).job_url }}|${{ fromJSON(steps.data.outputs.result).job_name }}>`, + short: true + }, + { + title: 'Author', + value: `${{ github.event.workflow_run.head_commit.author.name }}`, + short: true + }, + { + title: 'Pipeline URL', + value: ``, + short: true + }] + }] + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} \ No newline at end of file diff --git a/.github/workflows/playwright.yml b/.github/workflows/playwright.yml new file mode 100644 index 000000000..8f0343ed2 --- /dev/null +++ b/.github/workflows/playwright.yml @@ -0,0 +1,32 @@ +name: UI validation on prod +on: + workflow_dispatch: + schedule: + - cron: "0 * * * *" + +jobs: + run-playwright-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: lts/* + - name: Install dependencies + run: npm ci + - name: Install Playwright Browsers + run: npx playwright install --with-deps + - name: Run Playwright tests + run: npx playwright test --retries=2 + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: playwright-report + path: tests/playwright-report/ + retention-days: 30 + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: test-results + path: tests/test-results/ + retention-days: 30 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000..ff90e0eae --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,34 @@ +# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time. +# +# You can adjust the behavior by modifying this file. +# For more information, see: +# https://github.com/actions/stale +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '30 1 * * *' # run every day at 01:30 UTC + +permissions: # added using https://github.com/step-security/secure-workflows + contents: read + +jobs: + stale: + permissions: + issues: write # for actions/stale to close stale issues + pull-requests: write # for actions/stale to close stale PRs + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'This issue is stale because it has been open for 90 days with no activity. Remove the stale label or add a comment to keep it open. If you do not take action, this will be closed in 10 days.' + stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove the stale label or add a comment to keep it open. If you do not take action, this will be closed in 10 days.' + close-issue-message: 'This issue was closed after 100 days of inactivity.' + close-pr-message: 'This PR was closed after 100 days of inactivity.' + stale-issue-label: 'stale' + stale-pr-label: 'stale' + exempt-all-assignees: true + exempt-issue-labels: 'enhancement' + operations-per-run: 100 + days-before-stale: 90 + days-before-close: 10 diff --git a/.redocly.yaml b/.redocly.yaml new file mode 100644 index 000000000..1422a108a --- /dev/null +++ b/.redocly.yaml @@ -0,0 +1,19 @@ +apiDefinitions: + ctlrAdc: content/controller/api/reference/ctlr-adc-openapi.json + ctlrApim: content/controller/api/reference/ctlr-apim-openapi.json + ctlrAnalytics: content/controller/api/reference/ctlr-analytics-openapi.json + ctlrPlatform: content/controller/api/reference/ctlr-platform-openapi.json +lint: + extends: + - recommended + rules: + tag-description: off + operation-summary: error + no-unresolved-refs: error + no-unused-components: error + operation-2xx-response: error + operation-operationId: error + operation-singular-tag: error + no-enum-type-mismatch: error + no-identical-paths: error + no-ambiguous-paths: error \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 8276954e6..b65b17fe4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,5 @@ # Changelog - - -## 1.0.0 (Month Date, Year) +## 1.0.0 (January 13, 2025) Initial open source release of the documentation repository for enterprise NGINX products. This is a filtered mirror of an internal repository. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index e18d3706b..7f68c0bc1 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -74,5 +74,3 @@ This Code of Conduct is adapted from the [Contributor Covenant](https://www.cont Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/inclusion). - -For answers to common questions about this code of conduct, see the FAQ at . Translations are available at . diff --git a/README.md b/README.md index 04c774bef..228ac7965 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ once you've submitted the PR. ## License -[Creative Commons License](/LICENSE) +[BSD 2-Clause "Simplified" License](/LICENSE) © [F5, Inc.](https://www.f5.com/) 2024 diff --git a/SUPPORT.md b/SUPPORT.md index 8d67c82d1..a465c3ef9 100644 --- a/SUPPORT.md +++ b/SUPPORT.md @@ -6,7 +6,7 @@ We use GitHub for tracking bugs and feature requests related to this project. Don't know how something in this project works? Curious if this project can achieve your desired functionality? Please open an issue on GitHub with the label `question`. -Alternatively, open a [discussion](https://github.com/nginxinc/oss-docs/discussions) in this repository. +Alternatively, open a [discussion](https://github.com/nginx/documentation/discussions) in this repository. ## NGINX Specific Questions and/or Issues @@ -14,7 +14,7 @@ This isn't the right place to get support for NGINX specific questions, but the ### Mailing List -Want to get in touch with the NGINX development team directly? Try using the relevant mailing list found at ! +Want to get in touch with the NGINX development team directly? Try using the relevant mailing list found at , ### Documentation diff --git a/assets/ctlr/catalogs/dimensions-catalog.json b/assets/ctlr/catalogs/dimensions-catalog.json new file mode 100644 index 000000000..872f42b25 --- /dev/null +++ b/assets/ctlr/catalogs/dimensions-catalog.json @@ -0,0 +1,193 @@ +[ + { + "description": "The length of time for which monitor sample data is aggregated.", + "name": "aggregation_duration", + "type": "string" + }, + { + "description": "Alias of the upstream server.", + "name": "alias", + "type": "string" + }, + { + "description": "The name of an application. Application names are unique within the environment namespace where the app resides.", + "name": "app", + "type": "string" + }, + { + "description": "The address of BIG-IP device", + "name": "big_ip_address", + "type": "string" + }, + { + "description": "The name of a cache zone.", + "name": "cache_zone", + "type": "string" + }, + { + "description": "The name of an application component.", + "name": "component", + "type": "string" + }, + { + "description": "The ISO 3166-1 representation of the country from which a request was sent.", + "name": "country_code", + "type": "string" + }, + { + "description": "Environment of the application.", + "name": "environment", + "type": "string" + }, + { + "description": "Logical group to which the highest layer protocol processed by the proxy belongs. Allowed values are 'web' and 'tcp-udp'", + "name": "family", + "type": "string" + }, + { + "description": "Path to a desired file.", + "name": "file_path", + "type": "string" + }, + { + "description": "The name of gateway. The gateway can be shared across applications.", + "name": "gateway", + "type": "string" + }, + { + "description": "Hostname as provided in the 'Host' header field from client's request.", + "name": "http.hostname", + "type": "string" + }, + { + "description": "The HTTP method of the request. For example, 'GET', 'POST', 'PUT'.", + "name": "http.request_method", + "type": "string" + }, + { + "description": "The 3-digit HTTP response status code. For example, '200', '404', '302'.", + "name": "http.response_code", + "type": "string" + }, + { + "description": "The IP address and port, or the path to the UNIX-domain socket, of the upstream server.", + "name": "http.upstream_addr", + "type": "string" + }, + { + "description": "The URI (path) section of the URL in a request.", + "name": "http.uri", + "type": "string" + }, + { + "description": "A version schema.", + "name": "http.version_schema", + "type": "string" + }, + { + "description": "The name of the NGINX Plus instance where the upstream server is running.", + "name": "instance", + "type": "string" + }, + { + "description": "The tags associated with the NGINX Plus instance where the upstream server is running.", + "name": "instance.tags", + "type": "string" + }, + { + "description": "The name of the NGINX instance group to manage set of instances that can be used for scaling and high availability.", + "name": "instance_group", + "type": "string" + }, + { + "description": "local_id dimension.", + "name": "local_id", + "type": "string" + }, + { + "description": "A location associated with one or more NGINX Plus instances.", + "name": "location", + "type": "string" + }, + { + "description": "The user-defined filter provided in a metrics query.", + "name": "log_filter", + "type": "string" + }, + { + "description": "A filesystem mount point.", + "name": "mount_point", + "type": "string" + }, + { + "description": "A server network interface.", + "name": "network_interface", + "type": "string" + }, + { + "description": "The hostname of the NGINX Plus instance.", + "name": "parent_hostname", + "type": "string" + }, + { + "description": "Highest layer protocol that the proxy processes. Allowed values are: 'http', 'tcp', and 'udp'", + "name": "proxied_protocol", + "type": "string" + }, + { + "description": "A Published API in the API Management module.", + "name": "published_api", + "type": "string" + }, + { + "description": "Indicates whether the traffic was allowed to go through or was blocked by the firewall.", + "name": "request_outcome", + "type": "string" + }, + { + "description": "The reason why the firewall blocked the traffic.", + "name": "request_outcome_reason", + "type": "string" + }, + { + "description": "The unique identifier of the operating system where the Controller Agent is installed.", + "name": "root_uuid", + "type": "string" + }, + { + "description": "Address of the upstream server", + "name": "upstream_addr", + "type": "string" + }, + { + "description": "The 3-digit HTTP response status code sent by the upstream server. For example, '200', '404', '302'.", + "name": "upstream_response_code", + "type": "string" + }, + { + "description": "A list of comma separated names of suspected attacks identified in a transaction.", + "name": "waf.attack_types", + "type": "string" + }, + { + "description": "Value of the matching signatures that resulted in the security violation.", + "name": "waf.signature_ids", + "type": "string" + }, + { + "description": "The likelihood that the security violation is indeed a threat.", + "name": "waf.violation_rating", + "type": "string" + }, + { + "description": "The additional granularity of security violation that causes the traffic was blocked or flagged by the firewall.", + "name": "waf.violation_subviolations", + "type": "string" + }, + { + "description": "The security violation that causes the traffic was blocked or flagged by the firewall.", + "name": "waf.violations", + "type": "string" + } + ] + \ No newline at end of file diff --git a/assets/ctlr/catalogs/metrics-catalog.json b/assets/ctlr/catalogs/metrics-catalog.json new file mode 100644 index 000000000..d3899e520 --- /dev/null +++ b/assets/ctlr/catalogs/metrics-catalog.json @@ -0,0 +1,13075 @@ +[ + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "bigip" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of bits received on client-side", + "dimensions": [ + "environment" + ], + "name": "bigip.stream.bits_in", + "rate": "", + "rollup_aggregate": "SUM", + "source": "bigip_stats", + "type": "incremental", + "unit": "bits", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "bigip" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of bits sent on client-side", + "dimensions": [ + "environment" + ], + "name": "bigip.stream.bits_out", + "rate": "", + "rollup_aggregate": "SUM", + "source": "bigip_stats", + "type": "incremental", + "unit": "bits", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal number of connection on client-side", + "dimensions": [ + "environment" + ], + "name": "bigip.stream.conn", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of connections on client-side", + "dimensions": [ + "environment" + ], + "name": "bigip.stream.conn.current", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximum number of connections on client-side", + "dimensions": [ + "environment" + ], + "name": "bigip.stream.conn.max", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total number of connections on client-side", + "dimensions": [ + "environment" + ], + "name": "bigip.stream.conn.total", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Health indicator of client-side. It takes two possible values. 0 - unhealthy, 1 - healthy", + "dimensions": [ + "environment" + ], + "name": "bigip.stream.health.healthy", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "bigip" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of packets received on client-side", + "dimensions": [ + "environment" + ], + "name": "bigip.stream.packets_in", + "rate": "", + "rollup_aggregate": "SUM", + "source": "bigip_stats", + "type": "incremental", + "unit": "packtes", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "bigip" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of packets sent on client-side", + "dimensions": [ + "environment" + ], + "name": "bigip.stream.packets_out", + "rate": "", + "rollup_aggregate": "SUM", + "source": "bigip_stats", + "type": "incremental", + "unit": "packets", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "bigip" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of bits received on upstream server", + "dimensions": [ + "environment", + "instance_group", + "instance", + "big_ip_address" + ], + "name": "bigip.upstream.bits_in", + "rate": "", + "rollup_aggregate": "SUM", + "source": "bigip_stats", + "type": "incremental", + "unit": "bits", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "bigip" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of bits sent on upstream server", + "dimensions": [ + "environment", + "instance_group", + "instance", + "big_ip_address" + ], + "name": "bigip.upstream.bits_out", + "rate": "", + "rollup_aggregate": "SUM", + "source": "bigip_stats", + "type": "incremental", + "unit": "bits", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of connection on upstream server", + "dimensions": [ + "environment", + "instance_group", + "instance", + "big_ip_address" + ], + "name": "bigip.upstream.conn.current", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximum number of connections on upstream server", + "dimensions": [ + "environment", + "instance_group", + "instance", + "big_ip_address" + ], + "name": "bigip.upstream.conn.max", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal number of connection on upstream server", + "dimensions": [ + "environment", + "instance_group", + "instance", + "big_ip_address" + ], + "name": "bigip.upstream.conn.min", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total number of connections on upstream server", + "dimensions": [ + "environment", + "instance_group", + "instance", + "big_ip_address" + ], + "name": "bigip.upstream.conn.total", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "bigip" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Health indicator of upstream server. It takes two possible values. 0 - unhealthy, 1 - healthy", + "dimensions": [ + "environment", + "instance_group", + "instance", + "big_ip_address" + ], + "name": "bigip.upstream.health.healthy", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "bigip_stats", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "bigip" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of packets received on upstream server", + "dimensions": [ + "environment", + "instance_group", + "instance", + "big_ip_address" + ], + "name": "bigip.upstream.packets_in", + "rate": "", + "rollup_aggregate": "SUM", + "source": "bigip_stats", + "type": "incremental", + "unit": "packets", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "bigip" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of packets sent on upstream server", + "dimensions": [ + "environment", + "instance_group", + "instance", + "big_ip_address" + ], + "name": "bigip.upstream.packets_out", + "rate": "", + "rollup_aggregate": "SUM", + "source": "bigip_stats", + "type": "incremental", + "unit": "packets", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Bytes received from clients to be proxied to upstream servers/workload group members.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "bytes_rcvd", + "rate": "", + "rollup_aggregate": "SUM", + "source": "avrd", + "type": "incremental", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Bytes sent from upstream servers/workload group members to be proxied to clients.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "bytes_sent", + "rate": "", + "rollup_aggregate": "SUM", + "source": "avrd", + "type": "incremental", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Average value for total client latencies.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.latency.avg", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "The number of requests for which the total client latency was calculated.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.latency.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal value among total latencies on the client-side.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.latency.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal value among total latencies on the client-side.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol" + ], + "name": "client.latency.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total client latency value calculated as a sum of network and response latencies on the client-side.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.latency.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Hit count for client.network.latency calculation.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.network.latency.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal network latency on the client-side derived from TCP connection handshake.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.network.latency.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal network latency on the client-side derived from TCP connection handshake.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.network.latency.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total network latency on the client-side derived from TCP connection handshake.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.network.latency.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Hit count for client.request.latency calculation.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.request.latency.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal request latency on the client-side derived from HTTP(S) requests.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.request.latency.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal request latency on the client-side derived from HTTP(S) requests.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.request.latency.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total request latency on the client-side derived from HTTP(S) requests.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.request.latency.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Hit count for client.response.latency calculation.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.response.latency.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal response latency on the client-side derived from HTTP(S) responses.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.response.latency.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal response latency on the client-side derived from HTTP(S) responses.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.response.latency.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total response latency on the client-side derived from HTTP(S) responses.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.response.latency.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Hit count for client.ttfb.latency calculation.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.ttfb.latency.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal time measured on the client-side from sending the first byte of the request till receiving the first byte of the response.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol" + ], + "name": "client.ttfb.latency.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal time measured on the client-side from sending the first byte of the request till receiving the first byte of the response.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.ttfb.latency.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total time measured on the client-side from sending the first byte of the request till receiving the first byte of the response.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "client.ttfb.latency.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Count of connections from clients that are proxied to upstream servers/workload group members.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "connection_count", + "rate": "", + "rollup_aggregate": "SUM", + "source": "avrd", + "type": "incremental", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Hit count for connection_duration calculation.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "connection_duration.count", + "rate": "", + "rollup_aggregate": "SUM", + "source": "avrd", + "type": "incremental", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal duration of connections from clients that are proxied to upstream servers/workload group members.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "connection_duration.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal duration of connections from clients that are proxied to upstream servers/workload group members.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "connection_duration.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total duration of connections from clients that are proxied to upstream servers/workload group members.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "connection_duration.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "container" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Count of containers for \"container\" type objects.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "controller.agent.container.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "self", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "CPU utilization (kernel) percentage observed from the agent process.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "controller.agent.cpu.system", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "CPU utilization (total) percentage observed from the agent process.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "controller.agent.cpu.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "CPU utilization (user) percentage observed from the agent process.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "controller.agent.cpu.user", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Memory (RAM) utilized by the agent process.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "controller.agent.mem.rss", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Memory (total) utilized by the agent process.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "controller.agent.mem.vms", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Number of agent heartbeats (sent every minute).", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "controller.agent.status", + "rate": "", + "rollup_aggregate": "AVG", + "source": "self", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Bytes incoming to the upstream through the proxy.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "http.request.bytes_rcvd", + "rate": "", + "rollup_aggregate": "SUM", + "source": "avrd", + "type": "incremental", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Bytes outgoing from the upstream through the proxy.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "http.request.bytes_sent", + "rate": "", + "rollup_aggregate": "SUM", + "source": "avrd", + "type": "incremental", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "HTTP(S) requests coming to upstream through proxy.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "http.request.count", + "rate": "", + "rollup_aggregate": "SUM", + "source": "avrd", + "type": "incremental", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The response was fetched from the origin server instead of served from the cache because the request matched a proxy_cache_bypass directive. The response might then have been cached.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.cache.bypass", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The entry in the cache has expired. The response contains fresh content from the origin server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.cache.expired", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The response contains valid, fresh content direct from the cache.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.cache.hit", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The response was not found in the cache and so was fetched from an origin server. The response might then have been cached.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.cache.miss", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The proxy_cache_revalidate directive was enabled and NGINX verified that the current cached content was still valid.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.cache.revalidated", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The content is stale because the origin server is not responding correctly, and proxy_cache_use_stale was configured.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.cache.stale", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The content is stale because the entry is currently being updated in response to a previous request, and proxy_cache_use_stale updating is configured.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.cache.updating", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "integer", + "description": "NGINX-wide statistics describing HTTP and TCP connections.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.conn.accepted", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "NGINX-wide statistics describing HTTP and TCP connections.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.conn.active", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "NGINX-wide statistics describing HTTP and TCP connections.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.conn.current", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "integer", + "description": "NGINX-wide statistics describing HTTP and TCP connections.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.conn.dropped", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "NGINX-wide statistics describing HTTP and TCP connections.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.conn.idle", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "average", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "float", + "description": "Achieved compression ratio, calculated as the ratio between the original and compressed response sizes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.http.gzip.ratio", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "float", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Statistics about observed request methods.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.http.method.delete", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Statistics about observed request methods.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.method.get", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Statistics about observed request methods.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.method.head", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Statistics about observed request methods.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.http.method.options", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Statistics about observed request methods.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.method.other", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Statistics about observed request methods.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.method.post", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Statistics about observed request methods.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.method.put", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes sent to clients, not counting response headers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.body_bytes_sent", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests that were buffered to disk.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.buffered", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "error_log", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes sent to clients.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.bytes_sent", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "integer", + "description": "Total number of client requests.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Number of currently active requests (reading and writing).", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.current", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "average", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Request length, including request line, header, and body.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.length", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of malformed requests.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.malformed", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Number of requests reading headers from clients.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.reading", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Request processing time — time elapsed between reading the first bytes from the client and writing a log entry after the last bytes were sent.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.time", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Request processing time — time elapsed between reading the first bytes from the client and writing a log entry after the last bytes were sent.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.time.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Request processing time — time elapsed between reading the first bytes from the client and writing a log entry after the last bytes were sent.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.time.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Request processing time — time elapsed between reading the first bytes from the client and writing a log entry after the last bytes were sent.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.time.median", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Request processing time — time elapsed between reading the first bytes from the client and writing a log entry after the last bytes were sent.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.time.pctl95", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Number of requests writing responses to clients.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.request.writing", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with HTTP status codes 1xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.http.status.1xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with HTTP status codes 2xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.2xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with HTTP status codes 3xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.3xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with specific HTTP status code.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.403", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with specific HTTP status code.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.404", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with HTTP status codes 4xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.http.status.4xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with specific HTTP status code.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.500", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with specific HTTP status code.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.502", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with specific HTTP status code.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.503", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with specific HTTP status code.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.504", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests with HTTP status codes 5xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.5xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests finalized with status code 499 which is logged when the client closes the connection.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.status.discarded", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests using a specific version of the HTTP protocol.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.http.v0_9", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests using a specific version of the HTTP protocol.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.v1_0", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests using a specific version of the HTTP protocol.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.http.v1_1", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests using a specific version of the HTTP protocol.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.http.v2", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_combined", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Count of reloads of nginx master process.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.master.reloads", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "stub_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Status of nginx: 1 - nginx is up, 0 - nginx is down.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.status", + "rate": "", + "rollup_aggregate": "AVG", + "source": "self", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on establishing connections with upstream servers. With SSL, it also includes time spent on the handshake.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.connect.time", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Time spent on establishing connections with upstream servers. With SSL, it also includes time spent on the handshake.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.connect.time.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on establishing connections with upstream servers. With SSL, it also includes time spent on the handshake.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.connect.time.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on establishing connections with upstream servers. With SSL, it also includes time spent on the handshake.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.connect.time.median", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on establishing connections with upstream servers. With SSL, it also includes time spent on the handshake.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.connect.time.pctl95", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on receiving response headers from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.header.time", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Time spent on receiving response headers from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.header.time.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on receiving response headers from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.header.time.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on receiving response headers from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.upstream.header.time.median", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on receiving response headers from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.header.time.pctl95", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests that were sent to upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.next.count", + "rate": "", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests that were sent to upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.upstream.request.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests that failed while proxying.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.request.failed", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "error_log", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of upstream responses buffered to disk.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.response.buffered", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "error_log", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses that failed while proxying.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.response.failed", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "error_log", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "average", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Average length of the responses obtained from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "log_filter", + "instance_group" + ], + "name": "nginx.upstream.response.length", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on receiving responses from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.response.time", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Time spent on receiving responses from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.response.time.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on receiving responses from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.response.time.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on receiving responses from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.response.time.median", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Time spent on receiving responses from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.response.time.pctl95", + "rate": "", + "rollup_aggregate": "AVG", + "source": "access_custom", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from upstream servers with HTTP status codes 1xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.status.1xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from upstream servers with HTTP status codes 2xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.status.2xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from upstream servers with HTTP status codes 3xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.status.3xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from upstream servers with HTTP status codes 4xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.status.4xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from upstream servers with HTTP status codes 5xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.upstream.status.5xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "access_custom", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Number of NGINX worker processes observed.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "CPU utilization percentage observed for NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.cpu.system", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "CPU utilization percentage observed for NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.cpu.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "CPU utilization percentage observed for NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.cpu.user", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Number of file descriptors utilized by NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.fds_count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "RATE", + "default_unit": "disk_io", + "description": "Number of kilobytes read from disk by NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.io.kbs_r", + "rate": "disk_io_rate", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "kilobytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "RATE", + "default_unit": "disk_io", + "description": "Number of kilobytes written to disk by NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.io.kbs_w", + "rate": "disk_io_rate", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "kilobytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Memory utilized by NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.mem.rss", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "Memory utilization percentage for NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.mem.rss_pct", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Memory utilized by NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.mem.vms", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Hard limit on the number of file descriptors as seen by NGINX worker processes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "nginx.workers.rlimit_nofile", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The response was fetched from the origin server instead of served from the cache because the request matched a proxy_cache_bypass directive. The response might then have been cached.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.bypass.bytes", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The response was fetched from the origin server instead of served from the cache because the request matched a proxy_cache_bypass directive. The response might then have been cached.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.bypass.bytes_written", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The response was fetched from the origin server instead of served from the cache because the request matched a proxy_cache_bypass directive. The response might then have been cached.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.bypass.responses", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The response was fetched from the origin server instead of served from the cache because the request matched a proxy_cache_bypass directive. The response might then have been cached.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.bypass.responses_written", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The entry in the cache has expired. The response contains fresh content from the origin server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.expired.bytes", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The entry in the cache has expired. The response contains fresh content from the origin server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.expired.bytes_written", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The entry in the cache has expired. The response contains fresh content from the origin server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.expired.responses", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The entry in the cache has expired. The response contains fresh content from the origin server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.expired.responses_written", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The response contains valid, fresh content direct from the cache.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.hit.bytes", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The response contains valid, fresh content direct from the cache.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.hit.responses", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "Statistics about NGINX Plus cache usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.max_size", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The response was not found in the cache and so was fetched from an origin server. The response might then have been cached.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.miss.bytes", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The response was not found in the cache and so was fetched from an origin server. The response might then have been cached.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.miss.responses", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The proxy_cache_revalidate directive was enabled and NGINX verified that the current cached content was still valid.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.revalidated.bytes", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The proxy_cache_revalidate directive was enabled and NGINX verified that the current cached content was still valid.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.revalidated.responses", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Statistics about NGINX Plus cache usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.size", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": "The content is stale because the origin server is not responding correctly, and proxy_cache_use_stale was configured.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.stale.bytes", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "The content is stale because the origin server is not responding correctly, and proxy_cache_use_stale was configured.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.stale.responses", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "bytes_usage", + "description": " The content is stale because the entry is currently being updated in response to a previous request, and proxy_cache_use_stale updating is configured.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.updating.bytes", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": " The content is stale because the entry is currently being updated in response to a previous request, and proxy_cache_use_stale updating is configured.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "environment", + "app", + "component", + "cache_zone" + ], + "name": "plus.cache.updating.responses", + "rate": "", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes received from clients.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.request.bytes_rcvd", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes sent to clients.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.request.bytes_sent", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of client requests received.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.request.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses sent to clients.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.response.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Total number of failed SSL handshakes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.ssl.failed", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Total number of successful SSL handshakes.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.ssl.handshakes", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Total number of session reuses during SSL handshake.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.ssl.reuses", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses with status codes 1xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.status.1xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses with status codes 2xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.status.2xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses with status codes 3xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.status.3xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses with status codes 4xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.status.4xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses with status codes 5xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.status.5xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "status_zone", + "apimgmt_entry_point" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests completed without sending a response.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.http.status.discarded", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "cache" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of processes respawned.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.proc.respawned", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "slab" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Сurrent number of free memory pages.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.slab.pages.free", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "slab" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Percentage of free pages.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.slab.pages.pct_used", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "slab" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Sum of free and used memory pages above.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.slab.pages.total", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "slab" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Сurrent number of used memory pages.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.slab.pages.used", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes received from clients.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.bytes_rcvd", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes sent to clients.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.bytes_sent", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Total number of connections accepted from clients.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api" + ], + "name": "plus.stream.conn.accepted", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of client connections that are currently being processed.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.conn.active", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Total number of connections completed without creating a session.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.discarded", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.status.2xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.status.4xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.status.5xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes received from upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.bytes_rcvd", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes sent to upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.bytes_sent", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of connections.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.active", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Total number of client connections forwarded to this server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to connect to an upstream server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.time", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Average time to connect to an upstream server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.time.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to connect to an upstream server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.time.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to connect to an upstream server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.time.median", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to connect to an upstream server.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.time.pctl95", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to receive the first byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.ttfb", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Average time to receive the first byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.ttfb.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to receive the first byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.ttfb.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to receive the first byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.ttfb.median", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to receive the first byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.conn.ttfb.pctl95", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of unsuccessful attempts to communicate with upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.fails.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of performed health check requests.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.health.checks", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of failed health checks.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.health.fails", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of times the upstream servers became unhealthy.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.health.unhealthy", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of live (\"up\") upstream servers in an upstream group.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.peers.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to receive the last byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.response.time", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Average time to receive the last byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.response.time.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to receive the last byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.response.time.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to receive the last byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group", + "instance_group" + ], + "name": "plus.stream.upstream.response.time.median", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to receive the last byte of data.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.response.time.pctl95", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of times upstream servers became unavailable for client requests.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.unavail.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "stream_upstream" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of servers removed from the group but still processing active client connections.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.stream.upstream.zombies", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes received from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.bytes_rcvd", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Number of bytes sent to the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.bytes_sent", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of active connections to the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.conn.active", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Сurrent number of idle keepalive connections.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.conn.keepalive", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of unsuccessful attempts to communicate with upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.fails.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to get the response header from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.header.time", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Average time to get the response header from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.header.time.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to get the response header from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.header.time.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to get the response header from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.header.time.median", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to get the response header from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.header.time.pctl95", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of performed health check requests.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.health.checks", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of health failed check requests.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.health.fails", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of times the upstream servers became unhealthy.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.health.unhealthy", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of live (\"up\") upstream servers in an upstream group. If graphed/monitored without specifying an upstream, it's the current number of all live upstream servers in all upstream groups.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.peer.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of requests rejected due to queue overflows.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.queue.overflows", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of queued requests.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.queue.size", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of client requests forwarded to the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.request.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of client responses obtained from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.response.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to get the full response from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.response.time", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "counter", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Average time to get the full response from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.response.time.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "counter", + "unit": "integer", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to get the full response from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.response.time.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to get the full response from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.response.time.median", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "timer", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "time_s", + "description": "Average time to get the full response from the upstream servers.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.response.time.pctl95", + "rate": "", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "seconds", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from the upstream servers with status codes 1xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.status.1xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from the upstream servers with status codes 2xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.status.2xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from the upstream servers with status codes 3xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.status.3xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from the upstream servers with status codes 4xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.status.4xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of responses from the upstream servers with status codes 5xx.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.status.5xx", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of times upstream servers became unavailable for client requests.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.unavail.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "nginx", + "upstream", + "apimgmt_upstream_group" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Current number of servers removed from the group but still processing.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "plus.upstream.zombies", + "rate": "double", + "rollup_aggregate": "AVG", + "source": "plus_status", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "System CPU utilization.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.cpu.idle", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "System CPU utilization.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.cpu.iowait", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "System CPU stolen. Represents time when the real CPU was not available to the current VM.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.cpu.stolen", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "System CPU utilization.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.cpu.system", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "System CPU utilization.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.cpu.user", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "System disk usage statistics.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "file_path", + "instance_group" + ], + "name": "system.disk.free", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "System disk usage statistics, percentage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "file_path", + "instance_group" + ], + "name": "system.disk.in_use", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "System disk usage statistics.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "file_path", + "instance_group" + ], + "name": "system.disk.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "System disk usage statistics.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "file_path", + "instance_group" + ], + "name": "system.disk.used", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "RATE", + "default_unit": "integer", + "description": "Number of reads per sampling window.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "mount_point", + "instance_group" + ], + "name": "system.io.iops_r", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "RATE", + "default_unit": "integer", + "description": "Number of writes per sampling window.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "mount_point", + "instance_group" + ], + "name": "system.io.iops_w", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "DROP", + "default_unit": "disk_io", + "description": "Number of kilobytes read.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "mount_point", + "instance_group" + ], + "name": "system.io.kbs_r", + "rate": "disk_io_rate", + "rollup_aggregate": "RATE", + "source": "psutil", + "type": "gauge", + "unit": "kilobytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "RATE", + "default_unit": "disk_io", + "description": "Number of kilobytes written.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "mount_point", + "instance_group" + ], + "name": "system.io.kbs_w", + "rate": "disk_io_rate", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "kilobytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "time_ms", + "description": "Time spent reading from disk.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "mount_point", + "instance_group" + ], + "name": "system.io.wait_r", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "time_ms", + "description": "Time spent writing to disk.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "mount_point", + "instance_group" + ], + "name": "system.io.wait_w", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "la", + "description": "Number of processes in the system run queue, averaged over the last 1 min.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.load.1", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "float", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "la", + "description": "Number of processes in the system run queue, averaged over the last 15 min.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.load.15", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "float", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "la", + "description": "Number of processes in the system run queue, averaged over the last 5 min.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.load.5", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "float", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Statistics about system memory usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.mem.available", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Statistics about system memory usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.mem.buffered", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Statistics about system memory usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.mem.cached", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Statistics about system memory usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.mem.free", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "Statistics about system memory usage, percentage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.mem.pct_used", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": false, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Statistics about system memory usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.mem.shared", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Statistics about system memory usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.mem.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Statistics about system memory usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.mem.used", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "Statistics about system memory usage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.mem.used.all", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Network I/O statistics. Number of bytes received per network interface.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "network_interface", + "instance_group" + ], + "name": "system.net.bytes_rcvd", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "RATE", + "default_unit": "net_io", + "description": "Network I/O statistics. Number of bytes sent per network interface.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "network_interface", + "instance_group" + ], + "name": "system.net.bytes_sent", + "rate": "net_io_rate", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Network I/O statistics. Total number of inbound packets dropped per network interface.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "network_interface", + "instance_group" + ], + "name": "system.net.drops_in.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Network I/O statistics. Total number of outbound packets dropped per network interface.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "network_interface", + "instance_group" + ], + "name": "system.net.drops_out.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Number of times the listen queue of a socket overflowed.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.net.listen_overflows", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Network I/O statistics. Number of packets received per network interface.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "network_interface", + "instance_group" + ], + "name": "system.net.packets_in.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Network I/O statistics. Total number of errors while receiving per network interface.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "network_interface", + "instance_group" + ], + "name": "system.net.packets_in.error", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Network I/O statistics. Number of packets sent per network interface.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "network_interface", + "instance_group" + ], + "name": "system.net.packets_out.count", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "counter", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "SUM", + "default_unit": "integer", + "description": "Network I/O statistics. Total number of errors while sending per network interface.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "network_interface", + "instance_group" + ], + "name": "system.net.packets_out.error", + "rate": "double", + "rollup_aggregate": "DROP", + "source": "psutil", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "System swap memory statistics.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.swap.free", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "percent", + "description": "System swap memory statistics, percentage.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.swap.pct_free", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "percent", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "System swap memory statistics.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.swap.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "system", + "container" + ], + "compound_payload": "false", + "counted": true, + "default_rollup": "AVG", + "default_unit": "bytes_usage", + "description": "System swap memory statistics.", + "dimensions": [ + "local_id", + "parent_hostname", + "instance.tags", + "alias", + "instance", + "location", + "root_uuid", + "published_api", + "instance_group" + ], + "name": "system.swap.used", + "rate": "", + "rollup_aggregate": "AVG", + "source": "psutil", + "type": "gauge", + "unit": "bytes", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Hit count for upstream.header.response.latency calculation", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.header.latency.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal time it took the upstream to create response with headers only.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.header.latency.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal time it took the upstream to create response with headers only.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.header.latency.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total time it took the upstream to create response with headers only.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.header.latency.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Average value for total upstream latencies.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.latency.avg", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "The number of requests for which the total upstream latency was calculated.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.latency.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "integer", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal value among total latencies on upstream.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.latency.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal value among total latencies on upstream.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.latency.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total upstream latency value calculated as a sum of network and response latencies for the upstream.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.latency.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": false + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Hit count for upstream.network.latency calculation.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.network.latency.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal network latency on upstream.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.network.latency.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal network latency on upstream.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.network.latency.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total network latency on upstream.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.network.latency.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Hit count for upstream.response.latency calculation.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.response.latency.count", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "integer", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Maximal time it took the upstream to create response.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol" + ], + "name": "upstream.response.latency.max", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "gauge", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Minimal time it took the upstream to create response.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.response.latency.min", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + }, + { + "agent_type": "gauge", + "aggregations": [ + "MIN", + "MAX", + "SUM", + "AVG", + "COUNT", + "RATE" + ], + "backend_type": "incremental", + "categories": [ + "application" + ], + "compound_payload": "true", + "counted": false, + "default_rollup": "AVG", + "default_unit": "integer", + "description": "Total time it took the upstream to create response.", + "dimensions": [ + "local_id", + "http.response_code", + "request_outcome", + "request_outcome_reason", + "upstream_response_code", + "environment", + "app", + "component", + "aggregation_duration", + "country_code", + "http.upstream_addr", + "http.uri", + "http.request_method", + "http.hostname", + "http.version_schema", + "root_uuid", + "parent_hostname", + "instance.tags", + "alias", + "published_api", + "location", + "instance", + "gateway", + "waf.violation_subviolations", + "waf.signature_ids", + "waf.attack_types", + "waf.violation_rating", + "waf.violations", + "family", + "upstream_addr", + "proxied_protocol", + "instance_group" + ], + "name": "upstream.response.latency.total", + "rate": "", + "rollup_aggregate": "AVG", + "source": "avrd", + "type": "gauge", + "unit": "milliseconds", + "visible": true + } + ] + \ No newline at end of file diff --git a/build.sh b/build.sh new file mode 100755 index 000000000..f69629964 --- /dev/null +++ b/build.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -xeuo pipefail + +SITE_NAME="$SITE_NAME" + +if [[ -z "${SITE_NAME}" ]] ; then + echo "No SITE_NAME variable set. Unable to continue." + exit 1 ; + +elif [[ "${SITE_NAME}" == 'docs-nginx-com' ]] ; then + make all + ret=$? + echo "Command exited with $ret" ; +elif [[ "${SITE_NAME}" == 'docs-staging-nginx' ]] ; then + make all-staging + ret=$? + echo "Command exited with $ret" ; +elif [[ "${SITE_NAME}" == 'docs-dev-nginx' ]] ; then + make all-dev + ret=$? + echo "Command exited with $ret" ; +else + echo "Not running in production context on docs, docs-staging, or docs-dev. Check the build settings for the current Netlify context: $CONTEXT" + ret="1" + echo "Command exited with $ret" ; +fi \ No newline at end of file diff --git a/cloudcannon.config.yml b/cloudcannon.config.yml new file mode 100644 index 000000000..a27bef759 --- /dev/null +++ b/cloudcannon.config.yml @@ -0,0 +1,815 @@ +paths: + collections: content +collections_config: + pages: + parse_branch_index: true + path: / + standalone_pages: + path: content/ + output: true + parse_branch_index: false + filter: + base: strict + include: + - feedback.md + - ossc.md + - search.md + - success.md + name: Standalone Website Pages + description: This collection contains standalone website pages like the search landing page and feedback form. + icon: notes + sort_options: + - key: weight + order: ascending + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: true + disable_add_folder: false + disable_file_actions: false + schemas: {} + nginx_plus: + path: content/nginx + name: NGINX Plus + description: Documentation for NGINX and NGINX Plus. + output: true + parse_branch_index: false + icon: notes + preview: + metadata: + - text: + - key: path + icon: folder + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + create: + path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' + extra_data: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: false + disable_add_folder: false + disable_file_actions: false + schemas: + default: + path: .cloudcannon/schemas/default.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Task (default) + icon: task + preview: {} + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + create: + path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' + extra_data: {} + _inputs: {} + _select_data: {} + _structures: {} + concept: + path: .cloudcannon/schemas/concept.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Concept + icon: lightbulb + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + nap_dos: + path: content/nap-dos + output: true + name: NGINX App Protect DoS + description: Documentation for NGINX App Protect DoS + parse_branch_index: false + icon: notes + preview: + metadata: + - text: + - key: path + icon: folder + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: false + disable_add_folder: false + disable_file_actions: false + schemas: + default: + path: .cloudcannon/schemas/default.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Task (default) + icon: task + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + concept: + path: .cloudcannon/schemas/concept.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Concept + icon: lightbulb + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + nap_waf: + path: content/nap-waf + output: true + name: NGINX App Protect WAF + description: Documentation for NGINX App Protect WAF. + parse_branch_index: false + icon: notes + preview: + metadata: + - text: + - key: path + icon: folder + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + create: + path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' + extra_data: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: false + disable_add_folder: false + disable_file_actions: false + schemas: + default: + path: .cloudcannon/schemas/default.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Task (default) + icon: task + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + concept: + path: .cloudcannon/schemas/concept.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Concept + icon: lightbulb + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + nms: + path: content/nms + output: true + name: NGINX Management Suite + description: Documentation for NGINX Management Suite. + parse_branch_index: false + icon: notes + preview: + metadata: + - text: + - key: path + icon: folder + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + create: + path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' + extra_data: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: false + disable_add_folder: false + disable_file_actions: false + schemas: + default: + path: .cloudcannon/schemas/default.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Task (default) + icon: task + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + concept: + path: .cloudcannon/schemas/concept.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Concept + icon: lightbulb + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + policy: + path: .cloudcannon/schemas/nms/policy.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: ACM Policy + icon: table_chart + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + controller: + path: content/controller + output: true + name: NGINX Controller + description: Documentation for NGINX Controller. + parse_branch_index: false + icon: notes + preview: + metadata: + - text: + - key: path + icon: folder + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + create: + path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' + extra_data: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: false + disable_add_folder: true + disable_file_actions: true + schemas: + default: + path: .cloudcannon/schemas/default.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Task (default) + icon: task + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + modsec_waf: + path: content/modsec-waf + output: true + name: NGINX ModSec WAF + description: Documentation for NGINX ModSec WAF. + parse_branch_index: false + icon: notes + preview: + metadata: + - text: + - key: path + icon: folder + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: true + disable_add_folder: true + disable_file_actions: true + schemas: + default: + path: .cloudcannon/schemas/default.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Task (default) + icon: task + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + solution_bundles: + path: content/solutions + output: true + name: NGINX Solution Bundles + description: Instructions for deploying the multi-product solution bundles. + parse_branch_index: false + icon: notes + preview: + metadata: + - text: + - key: path + icon: folder + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + create: + path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' + extra_data: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: false + disable_add_folder: false + disable_file_actions: false + schemas: + default: + path: .cloudcannon/schemas/default.md + reorder_inputs: true + hide_extra_inputs: true + remove_empty_inputs: false + remove_extra_inputs: false + name: Task (default) + icon: task + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + includes: + path: content/includes + output: false + name: Content Reuse + description: A collection of files that can be reused in other documents across the website. + filter: + exclude: + - index.md + parse_branch_index: false + icon: notes + preview: + metadata: + - text: + - key: path + icon: folder + _editables: {} + _inputs: {} + _select_data: {} + _structures: {} + create: + path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' + extra_data: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: false + disable_add_folder: false + disable_file_actions: false + schemas: + default: + path: .cloudcannon/schemas/includes.md + name: Includes template + headless-collection: + path: .cloudcannon/schemas/headless-collection.md + name: Index.md template + data: + path: data + output: false + icon: dataset + _editables: {} + _enabled_editors: + - data + - content + _inputs: {} + _select_data: {} + _structures: {} + create: + path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' + extra_data: {} + _inputs: {} + _select_data: {} + _structures: {} + disable_add: false + disable_add_folder: true + disable_file_actions: true + schemas: {} +collections_config_override: true +collection_groups: + - heading: "Standalone Web Pages" + collections: + - "standalone_pages" + - heading: "Product Docs" + collections: + - "nginx_plus" + - "nap_dos" + - "nap_waf" + - "nms" + - "controller" + - "modsec_waf" + - heading: "Solutions" + collections: + - "solution_bundles" + - heading: "Content Reuse" + collections: + - "includes" +_enabled_editors: + - content +_inputs: {} +_select_data: {} +_structures: {} +_editables: + text: + bold: true + copyformatting: true + italic: true + link: true + redo: true + removeformat: true + strike: true + subscript: true + superscript: true + underline: true + undo: true +data_config: true +timezone: Etc/UTC +commit_templates: + - template_string: '{commit_type}: {message|trim} {breaking_change|if=breaking_change_message}' + _inputs: + commit_type: + type: select + options: + allow_empty: true + values: + - feature + - fix + - refactor + - update + cascade: true + breaking_change_message: + type: text + cascade: true + extra_data: + breaking_change: |- + + ⚠️ {breaking_change_message} +_snippets_imports: + hugo: + exclude: + - hugo_youtube + - hugo_twitter + - hugo_vimeo + - hugo_instagram +_snippets: + important: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: Important + subtext: + - key: content_markdown + - Important note in a callout box. + icon: priority_high + definitions: + shortcode_name: important + content_key: content_markdown + named_args: [] + note: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: Note + subtext: + - key: content_markdown + - Note in callout box. + icon: edit_note + definitions: + shortcode_name: note + content_key: content_markdown + named_args: [] + see-also: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: See also + subtext: + - key: content_markdown + - See Also note in callout box. + icon: visibility + definitions: + shortcode_name: see-also + content_key: content_markdown + named_args: [] + before-you-begin: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: Befor You Begin + subtext: + - key: content_markdown + - Before You Begin note in callout box. + icon: front_hand + definitions: + shortcode_name: before-you-begin + content_key: content_markdown + named_args: [] + deprecated: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: Deprecated + subtext: + - key: content_markdown + - Deprecated callout in a box. + icon: delete + definitions: + shortcode_name: deprecated + content_key: content_markdown + named_args: [] + caution: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: Caution + subtext: + - key: content_markdown + - Caution callout in a box. + icon: announcement + definitions: + shortcode_name: caution + content_key: content_markdown + named_args: [] + warning: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: Warning + subtext: + - key: content_markdown + - Warning callout in a box. + icon: warning + definitions: + shortcode_name: warning + content_key: content_markdown + named_args: [] + call-out: + template: hugo_paired_shortcode_positional_args + inline: false + preview: + text: + - key: callout_title + - Callout + subtext: + - key: content_markdown + - Custome callout with title in a box. + icon: + - key: icon_param + - document + definitions: + shortcode_name: call-out + content_key: content_markdown + positional_args: + - editor_key: icon_param + type: string + - editor_key: callout_title + type: string + Internal comment: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: Comment (Internal only) + subtext: + - key: content_markdown + - Internal comment that will not be rendered in the page. + icon: tag + definitions: + shortcode_name: comment + content_key: content_markdown + named_args: [] + include: + template: hugo_shortcode_positional_args + inline: true + preview: + text: Include + subtext: + - key: url + - Include snippet. + icon: file_copy + definitions: + shortcode_name: include + positional_args: + - editor_key: url + type: string + bootstrap-table: + template: hugo_paired_shortcode_positional_args + inline: false + preview: + text: Bootstrap Table + subtext: + - key: content_markdown_table + - type: code + - Markdown table using Bootstrap. + icon: table_chart + definitions: + shortcode_name: bootstrap-table + content_key: content_markdown_table + positional_args: + - editor_key: table_style + _inputs: + content_markdown_table: + type: code + cascade: true + raw-html: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: Raw HTML + subtext: + - key: content_code_block + - Raw HTML Code. + icon: html + definitions: + shortcode_name: raw-html + content_key: content_code_block + named_args: [] + img: + template: hugo_shortcode_named_args + inline: true + preview: + text: + - key: src + - Image + subtext: + - key: alt + - Alternative text + icon: image + definitions: + shortcode_name: img + named_args: + - editor_key: src + type: string + - editor_key: caption + optional: true + type: string + - editor_key: alt + optional: true + type: string + - editor_key: width + optional: true + type: string + - editor_key: height + optional: true + type: string + tab group: + template: hugo_paired_shortcode_named_args + inline: false + preview: + text: + - key: name + - Tab group section + subtext: + - key: content_code_block + - Tabs content + icon: tab + definitions: + shortcode_name: tabs + content_key: content_code_block + named_args: + - editor_key: name + type: string + tab section: + template: hugo_paired_markdown_shortcode_named_args + inline: false + preview: + text: + - key: name + - Tab inside tabbed section + subtext: + - key: tab_content + - Tab content + icon: tab + definitions: + shortcode_name: tabs + content_key: tab_content + named_args: + - editor_key: name + type: string + beta-badge: + template: hugo_shortcode_positional_args + inline: false + preview: + text: This topic documents an early access feature. + subtext: >- + These features are provided for you to try before they are generally + available. You shouldn't use early access features for production + purposes. + icon: science + definitions: + shortcode_name: beta-badge + relref: + template: hugo_shortcode_positional_args + inline: true + preview: + view: inline + icon: link + text: + - key: url + - Empty URL + definitions: + shortcode_name: relref + positional_args: + - editor_key: url + type: string + link: + template: hugo_shortcode_positional_args + inline: true + preview: + text: + - key: text + - Link to download a file + icon: system_update_alt + definitions: + shortcode_name: link + positional_args: + - editor_key: url + type: string + - editor_key: text + fontawesome-Icon: + template: hugo_shortcode_positional_args + inline: true + preview: + view: inline + text: FA-icon + icon: + - key: icon_name + definitions: + shortcode_name: fa + positional_args: + - editor_key: icon_name + type: string + - editor_key: styles + type: string + optional: true + remove_empty: true + icon-resolved: + template: hugo_markdown_shortcode_named_args + inline: true + preview: + text: Resolved Bug Icon + icon: bug_report + definitions: + shortcode_name: icon-resolved + content_key: content_markdown + named_args: [] + icon-bug: + template: hugo_markdown_shortcode_named_args + inline: true + preview: + text: Bug Icon + icon: bug_report + definitions: + shortcode_name: icon-bug + content_key: content_markdown + named_args: [] + shortversions: + template: hugo_shortcode_positional_args + inline: false + preview: + text: Shortversions - This shortcode has been deprecated + icon: format_list_numbered + definitions: + shortcode_name: shortversions + positional_args: + - editor_key: from + type: string + - editor_key: to + type: string + - editor_key: product + type: string + versions: + template: hugo_shortcode_positional_args + inline: false + preview: + text: Versions - This shortcode has been deprecated + icon: format_list_numbered + definitions: + shortcode_name: versions + positional_args: + - editor_key: from + type: string + - editor_key: to + type: string + - editor_key: product + type: string diff --git a/config/development/config.toml b/config/development/config.toml new file mode 100644 index 000000000..f19789597 --- /dev/null +++ b/config/development/config.toml @@ -0,0 +1,2 @@ +baseURL = "https://docs-dev.nginx.com" +title = "DEV -- docs-nginx-com" diff --git a/config/production/config.toml b/config/production/config.toml new file mode 100644 index 000000000..b367a4d63 --- /dev/null +++ b/config/production/config.toml @@ -0,0 +1,22 @@ +baseURL = "https://docs.nginx.com" +title = "NGINX Documentation" +enableGitInfo = true +[privacy] + [privacy.disqus] + disable = false + [privacy.googleAnalytics] + anonymizeIP = true + disable = false + respectDoNotTrack = true + useSessionStorage = false + [privacy.instagram] + disable = true + [privacy.twitter] + disable = false + enableDNT = true + simple = false + [privacy.vimeo] + disable = true + [privacy.youtube] + disable = false + privacyEnhanced = true diff --git a/config/staging/config.toml b/config/staging/config.toml new file mode 100644 index 000000000..2cf38eda5 --- /dev/null +++ b/config/staging/config.toml @@ -0,0 +1,2 @@ +baseURL = "https://docs-staging.nginx.com" +title = "STAGING - docs-nginx-com" diff --git a/content/_index.md b/content/_index.md new file mode 100644 index 000000000..d6816edad --- /dev/null +++ b/content/_index.md @@ -0,0 +1,4 @@ +--- +title: NGINX Product Documentation +description: Learn how to deliver, manage, and protect your applications using F5 NGINX products. +--- \ No newline at end of file diff --git a/content/amplify/_index.md b/content/amplify/_index.md new file mode 100644 index 000000000..f93c4b15c --- /dev/null +++ b/content/amplify/_index.md @@ -0,0 +1,8 @@ +--- +title: NGINX Amplify +description: Lightweight SaaS monitoring and static analysis for NGINX Open Source and F5 NGINX Plus. +url: /nginx-amplify/ +cascade: + logo: "NGINX-Amplify-product-icon-RGB.svg" +--- + diff --git a/content/amplify/additional-resources.md b/content/amplify/additional-resources.md new file mode 100644 index 000000000..301f7c13c --- /dev/null +++ b/content/amplify/additional-resources.md @@ -0,0 +1,12 @@ +--- +title: Other NGINX Amplify Resources +weight: 899 +toc: true +tags: ["docs"] +docs: "DOCS-977" +--- + +If you are interested in learning more about F5 NGINX Amplify, visit the [NGINX Amplify Resources Page](https://amplify.nginx.com/docs/), where you will find links to: + +- The Amplify Blog +- Amplify Videos \ No newline at end of file diff --git a/content/amplify/changelog.md b/content/amplify/changelog.md new file mode 100644 index 000000000..9b876ca73 --- /dev/null +++ b/content/amplify/changelog.md @@ -0,0 +1,42 @@ +--- +title: "Changelog" +description: "These release notes list and describe the new features, enhancements, and resolved issues in F5 NGINX Amplify" +weight: 900 +toc: true +--- + +## Sept 21, 2023 + +### What's New + +- {{% icon-feature %}} **Removal of F5 NGINX Amplify Beta User Interface** + + The NGINX Amplify Beta User Interface is no longer available. We're shifting our focus to concentrate on core initiatives. A big thank you to all who tested the interface or shared their thoughts. + + +## July 26, 2023 + +### What's New + +- {{% icon-feature %}} **Configure Slack Notifications in Beta UI and other bug fixes** + + The Beta UI now supports adding Slack channels when configuring notifications in the Settings -> Notifications page. (Issue 907) + + A number of cosmetic and performance related bugs have also been fixed. + +## July 14th, 2023 + + ### What's New + +This release includes the following updates: + +- {{% icon-feature %}} **NGINX Amplify Beta User Interface** + + The new Amplify user interface is in beta! To enable it, you can select the **Try New UI** button in the upper right corner of your browser. Please give it a try and let us know what you think by selecting **Give Feedback** in the upper right corner of your browser window. + + If you want to revert to the classic user interface, you can navigate to the Settings page by selecting the gear icon in the upper right corner of your browser and selecting **Switch to Classic UI**. + + +### Known Issues + +- You can find information about known issues with NGINX Amplify in the [Known Issues]({{< relref "/amplify/known-issues.md" >}}) topic. diff --git a/content/amplify/faq/_index.md b/content/amplify/faq/_index.md new file mode 100644 index 000000000..91616df43 --- /dev/null +++ b/content/amplify/faq/_index.md @@ -0,0 +1,5 @@ +--- +title: Frequently Asked Questions +weight: 800 +url: /nginx-amplify/faq/ +--- diff --git a/content/amplify/faq/general.md b/content/amplify/faq/general.md new file mode 100644 index 000000000..96f884a63 --- /dev/null +++ b/content/amplify/faq/general.md @@ -0,0 +1,37 @@ +--- +title: General +description: General questions about F5 NGINX Amplify +weight: 10 +toc: true +tags: ["docs"] +docs: "DOCS-956" +--- + + +### What Is F5 NGINX Amplify? + +NGINX Amplify is a tool for comprehensive NGINX monitoring. With NGINX Amplify it's easy to proactively analyze and fix problems related to running and scaling NGINX-based web applications. + +You can use NGINX Amplify to do the following: + + * Visualize and identify NGINX performance bottlenecks, overloaded servers, or potential DDoS attacks + * Improve and optimize NGINX performance with intelligent advice and recommendations + * Get notified when something is wrong with the application infrastructure + * Plan web application capacity and performance + * Keep track of the systems running NGINX + +### Where Is NGINX Amplify Hosted? + +NGINX Amplify is a SaaS and it is currently hosted in [AWS us-west-1](http://docs.aws.amazon.com/general/latest/gr/rande.html) (US West, N. California). + +### Is the NGINX Amplify Agent Traffic Protected? + +All communications between NGINX Amplify Agent and the backend are done securely over SSL/TLS. NGINX Amplify Agent always initiates all traffic. The backend system doesn't set up any connections back to NGINX Amplify Agent. + +### Is the NGINX Amplify Agent Code Publicly Available? + +NGINX Amplify Agent is an open-source application. It is licensed under the [2-clause BSD license](https://github.com/nginxinc/nginx-amplify-agent/blob/master/LICENSE), and the code is available in [NGINX Amplify's GitHub repository](https://github.com/nginxinc/nginx-amplify-agent). + +### What is This Question About My Password When Installing NGINX Amplify Agent? + +It could be that you're starting the install script from a non-root account. In this case, you will need *sudo* rights. While it depends on a particular system configuration, with a non-root account *sudo* will typically ask for a password. diff --git a/content/amplify/faq/metrics-and-metadata.md b/content/amplify/faq/metrics-and-metadata.md new file mode 100644 index 000000000..701c98376 --- /dev/null +++ b/content/amplify/faq/metrics-and-metadata.md @@ -0,0 +1,14 @@ +--- +title: NGINX Amplify Metrics and Metadata +description: Questions about F5 NGINX Amplify's Metrics and Metadata +weight: 40 +toc: true +tags: ["docs"] +docs: "DOCS-957" +--- + +### What Data Does F5 NGINX Amplify Agent Gather? + +[NGINX Amplify Agent Metrics and Metadata]({{< relref "/amplify/nginx-amplify-agent/metadata-metrics-collection" >}}) + +{{< note >}}For a complete list of metrics, refer to the [Metrics and Metadata documentation]({{< relref "/amplify/metrics-metadata" >}}).{{< /note >}} \ No newline at end of file diff --git a/content/amplify/faq/nginx-amplify-agent.md b/content/amplify/faq/nginx-amplify-agent.md new file mode 100644 index 000000000..a3a1c620e --- /dev/null +++ b/content/amplify/faq/nginx-amplify-agent.md @@ -0,0 +1,176 @@ +--- +title: NGINX Amplify Agent +description: Questions about F5 NGINX Amplify Agent +weight: 20 +toc: true +tags: ["docs"] +docs: "DOCS-958" +--- + +### What Operating Systems are Supported? + +The F5 NGINX Amplify Agent is currently officially packaged and supported for the following Linux flavors only: + + * Ubuntu 22.04 "jammy" (amd64/arm64) + * Ubuntu 20.04 "focal" (amd64/arm64) + * Ubuntu 18.04 "bionic" (amd64/arm64) + * Debian 12 "bookworm" (amd64/arm64) + * Debian 11 "bullseye" (amd64/arm64) + * Debian 10 "buster" (amd64/arm64) + * RHEL/CentOS/OEL 9 (amd64/arm64) + * Amazon Linux 2 LTS (amd64/arm64) + +The following platforms are no longer supported but still can be used with older agent packages powered by Python 3: + + * RHEL/CentOS/OEL 8 (amd64/arm64) + +The following platforms are no longer supported but still can be used with older agent packages powered by Python 2: + + * Ubuntu 16.04 "xenial" (i386/amd64/arm64) + * Debian 9 "stretch" (i386/amd64) + * RHEL/CentOS/OEL 6 (i386/amd64) + * RHEL/CentOS/OEL 7 (amd64) + * Amazon Linux (amd64) + +Other OS and distributions below are not fully supported yet (and no agent packages are available), however you can grab a specialized install script [here](https://raw.githubusercontent.com/nginxinc/nginx-amplify-agent/master/packages/install-source.sh) and see if it works for you. Run [install-source.sh](https://raw.githubusercontent.com/nginxinc/nginx-amplify-agent/master/packages/install-source.sh) (as root) instead of *install.sh* and follow the dialog. You can copy the API key from the Amplify UI (find it in the **Settings** or in the **New System** pop-up). + + * FreeBSD + * SLES + * Alpine + * Fedora + +Feel free to [submit](https://github.com/nginxinc/nginx-amplify-agent/) an issue or a PR if you find something that has to be fixed. + +We also have an experimental Ebuild for Gentoo. + +### What Version of Python is Required? + +NGINX Amplify Agent starting from version 1.8.0, works with Python >= 3.6. + +Previous versions were powered by Python 2.6 and 2.7, depending on the target platform. + +### How Do I Start to Monitor My Systems with NGINX Amplify? + +1. Download and run the install script. + + ```bash + curl -sS -L -O \ + https://github.com/nginxinc/nginx-amplify-agent/raw/master/packages/install.sh && \ + API_KEY='YOUR_API_KEY' sh ./install.sh + ``` + + where YOUR_API_KEY is a unique API key assigned when you create an account with NGINX Amplify. You can also find the API key in the **Account** menu. + +2. Verify that NGINX Amplify Agent has started. + + ```bash + ps ax | grep -i 'amplify\-' + 2552 ? S 0:00 amplify-agent + ``` + +For manual installation, please check the [user guide]({{< relref "/amplify/nginx-amplify-agent/install/installing-amplify-agent#installing-the-agent-manually" >}}). + +### What Do I Need to Configure NGINX Amplify Agent to Report Metrics Correctly? + +Once you install NGINX Amplify Agent, it will automatically begin sending metrics. You can expect to see real-time metrics in the Amplify web interface within about a minute. + +If you don't see the new system or NGINX in the web interface, or (some) metrics aren't being collected, please check the following: + +1. The NGINX Amplify Agent package has been successfully [installed]({{< relref "/amplify/nginx-amplify-agent/install/installing-amplify-agent.md" >}}), and no warnings were shown during the installation. + +2. The `amplify-agent` process is running and updating its [log file]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#agent-logfile" >}}). + +3. NGINX Amplify Agent is running under the same user as your NGINX worker processes. + +4. The NGINX instance is started with an absolute path. Currently, NGINX Amplify Agent **can't** detect NGINX instances launched with a relative path (e.g., "./nginx"). + +5. The [user ID that is used by NGINX Amplify Agent and NGINX ]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#overriding-the-effective-user-id" >}}), can run *ps(1)* to see all system processes. If *ps(1)* is restricted for non-privileged users, NGINX Amplify Agent won't be able to find and properly detect the NGINX master process. + +6. The time is set correctly. If the time on the system where NGINX Amplify Agent runs is ahead or behind the world's clock, you won't be able to see the graphs. + +7. *stub_status* is [properly configured]({{< relref "/amplify/nginx-amplify-agent/configuring-metric-collection" >}}), and the *stub_status module* is included in the NGINX build (this can be checked with `nginx -V`). + +8. NGINX [access.log](http://nginx.org/en/docs/http/ngx_http_log_module.html) and [error.log](http://nginx.org/en/docs/ngx_core_module.html#error_log) files are readable by the user `nginx` (or by the [user](http://nginx.org/en/docs/ngx_core_module.html#user) set in NGINX config). + +9. All NGINX configuration files are readable by the NGINX Amplify Agent user ID (check owner, group, and permissions). + +10. Extra [configuration steps have been performed as required]({{< relref "/amplify/metrics-metadata/nginx-metrics#additional-nginx-metrics" >}}) for the additional metrics to be collected. + +11. The system DNS resolver is correctly configured, and *receiver.amplify.nginx.com* can be successfully resolved. + +12. Outbound TLS/SSL from the system to *receiver.amplify.nginx.com* is not restricted. This can be checked with *curl(1)*. [Configure a proxy server]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#setting-up-a-proxy" >}}) for NGINX Amplify Agent if required. + +13. *selinux(8)*, *apparmor(7)* or [grsecurity](https://grsecurity.net) are not interfering with the metric collection. E.g. for *selinux(8)* check **/etc/selinux/config**, try `setenforce 0` temporarily and see if it improves the situation for certain metrics. + +14. Some VPS providers use hardened Linux kernels that may restrict non-root users from accessing */proc* and */sys*. Metrics describing system and NGINX disk I/O are usually affected. There is no easy workaround except for allowing NGINX Amplify Agent to run as `root`. Sometimes fixing permissions for */proc* and */sys/block* may work. + +### How Do I Verify that NGINX Amplify Agent Is Correctly Installed? + +1. On Ubuntu/Debian use: + + ```bash + dpkg -s nginx-amplify-agent + ``` + +2. On CentOS and Red Hat use: + + ```bash + yum info nginx-amplify-agent + ``` + +### How Can I Update NGINX Amplify Agent? + +1. On Ubuntu/Debian use: + + ```bash + apt-get update && \ + apt-get install nginx-amplify-agent + ``` + +2. On CentOS use: + + ```bash + yum makecache && \ + yum update nginx-amplify-agent + ``` + +### What System Resources are Required? + +Under 10% of the CPU and a few dozen MBs of RSS memory will be consumed. If you notice any anomalies in the system resource consumption, please submit a support request through https://my.f5.com/, please attach the debug log to the support case. + +### How Do I Restart NGINX Amplify Agent? + + ``` + # service amplify-agent restart + ``` + +### How Can I Uninstall NGINX Amplify Agent? + +Guide to [uninstall NGINX Amplify Agent]({{< relref "/amplify/nginx-amplify-agent/install/uninstalling-amplify-agent" >}}) + +### How Can I Override System Hostname? + +If NGINX Amplify Agent is not able to determine the system's hostname, you can define it manually in **/etc/amplify-agent/agent.conf** + +Find the following section, and fill in the desired hostname: + +```nginx +[credentials] +.. +hostname = myhostname1 +``` + +The hostname should be valid — the following aren't valid hostnames: + + * localhost + * localhost.localdomain + * localhost6.localdomain6 + * ip6-localhost + +### How Can I Override the User ID for NGINX Amplify Agent to Use? + +Refer to the [Configuring NGINX Amplify Agent]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#overriding-the-effective-user-id" >}}) section in the documentation. + +### Can I Use NGINX Amplify Agent with Docker? + +Please check the [following section](https://github.com/nginxinc/docker-nginx-amplify) of the NGINX Amplify Agent repository to find out more. Keep in mind that the support for a Docker environment is currently experimental. diff --git a/content/amplify/faq/user-interface.md b/content/amplify/faq/user-interface.md new file mode 100644 index 000000000..fc05e70b1 --- /dev/null +++ b/content/amplify/faq/user-interface.md @@ -0,0 +1,33 @@ +--- +title: NGINX Amplify User Interface +description: Questions about F5 NGINX Amplify's User Interface +weight: 30 +toc: true +tags: ["docs"] +docs: "DOCS-959" +--- + +### What Browsers are Supported? + +Currently, the following desktop browsers are officially supported: + + * Chrome + * Firefox + * Safari + * Opera + +### Is the Traffic to the Web Interface Secure? + +We only support SSL/TLS connections to the F5 NGINX Amplify web interface. + +### How Can I Delete a System or an NGINX Instance from NGINX Amplify? + +To completely delete a previously monitored object follow these steps: + + 1. Uninstall NGINX Amplify Agent + 2. Delete objects from the web interface + 3. Delete alarms + +To delete a system using the web interface — find it in the [Inventory]({{< relref "/amplify/user-interface/inventory" >}}), and select the [i] icon. You can delete objects from the popup window that appears next. + +{{< important >}}Deleting objects in the User Interface will not stop NGINX Amplify Agent. To completely remove a system from monitoring, please stop or uninstall NGINX Amplify Agent, clean it up in the web interface, and clean up any alerts.{{< /important >}} diff --git a/content/amplify/known-issues.md b/content/amplify/known-issues.md new file mode 100644 index 000000000..c91861925 --- /dev/null +++ b/content/amplify/known-issues.md @@ -0,0 +1,23 @@ +--- +title: Known Issues +weight: 1000 +description: "List of known issues in the latest release of F5 NGINX Amplify" +toc: true +tags: ["docs"] +--- + +{{}} + +--- + +### {{% icon-bug %}} Unable to add some NGINX HTTP requests metrics to custom graph {#631} + +{{}} +| Issue ID | Status | +|----------|--------| +| 631 | Open | +{{}} + +#### Description + +When trying to create a custom dashboard for the metrics nginx.http.request.reading and nginx.http.request.writing, the option to add them to the dashboard is disabled. diff --git a/content/amplify/metrics-metadata/_index.md b/content/amplify/metrics-metadata/_index.md new file mode 100644 index 000000000..267c548be --- /dev/null +++ b/content/amplify/metrics-metadata/_index.md @@ -0,0 +1,5 @@ +--- +title: Metrics and Metadata +weight: 700 +url: /nginx-amplify/metrics-metadata/ +--- diff --git a/content/amplify/metrics-metadata/metrics-overview.md b/content/amplify/metrics-metadata/metrics-overview.md new file mode 100644 index 000000000..c0aa38c7b --- /dev/null +++ b/content/amplify/metrics-metadata/metrics-overview.md @@ -0,0 +1,12 @@ +--- +title: Metrics and Metadata Overview +description: Learn about the metrics and metadata F5 NGINX Amplify collects. +weight: 10 +toc: true +tags: ["docs"] +docs: "DOCS-972" +--- + +Most metrics are collected by F5 NGINX Amplify Agent without requiring the user to perform any additional setup. For troubleshooting, see [Troubleshooting Metrics Collection]({{< relref "/amplify/nginx-amplify-agent/troubleshooting-metrics-collection.md" >}}). + +Some additional metrics for NGINX monitoring will only be reported if the NGINX configuration file is modified accordingly. See [Additional NGINX Metrics]({{< relref "/amplify/metrics-metadata/nginx-metrics#additional-nginx-metrics" >}}), and review the *Source* and *Variable* fields in the metric descriptions that follow. \ No newline at end of file diff --git a/content/amplify/metrics-metadata/nginx-metrics.md b/content/amplify/metrics-metadata/nginx-metrics.md new file mode 100644 index 000000000..ece5a188c --- /dev/null +++ b/content/amplify/metrics-metadata/nginx-metrics.md @@ -0,0 +1,929 @@ +--- +title: NGINX Metrics +description: List of NGINX Metrics +weight: 30 +toc: true +tags: ["docs"] +docs: "DOCS-973" +--- + +## HTTP Connections and Requests + +- #### **nginx.http.conn.accepted** +- #### **nginx.http.conn.dropped** + + + ``` + Type: counter, integer + Description: NGINX-wide statistics describing HTTP connections. + Source: stub_status (or NGINX Plus status API) + ``` + + +- #### **nginx.http.conn.active** +- #### **nginx.http.conn.current** +- #### **nginx.http.conn.idle** + + + ``` + Type: gauge, integer + Description: NGINX-wide statistics describing HTTP connections. + Source: stub_status (or NGINX Plus status API) + ``` + + +- #### **nginx.http.request.count** + + + ``` + Type: counter, integer + Description: Total number of client requests. + Source: stub_status (or NGINX Plus status API) + ``` + + +- #### **nginx.http.request.current** +- #### **nginx.http.request.reading** +- #### **nginx.http.request.writing** + + + ``` + Type: gauge, integer + Description: Number of currently active requests (reading and writing). + Number of requests reading headers or writing responses to clients. + Source: stub_status (or NGINX Plus status API) + ``` + + +- #### **nginx.http.request.malformed** + + + ``` + Type: counter, integer + Description: Number of malformed requests. + Source: access.log + ``` + + +- #### **nginx.http.request.body_bytes_sent** + + + ``` + Type: counter, integer + Description: Number of bytes sent to clients, not counting response headers. + Source: access.log + ``` + + +## HTTP Methods + +- #### **nginx.http.method.get** +- #### **nginx.http.method.head** +- #### **nginx.http.method.post** +- #### **nginx.http.method.put** +- #### **nginx.http.method.delete** +- #### **nginx.http.method.options** + + + ``` + Type: counter, integer + Description: Statistics about observed request methods. + Source: access.log + ``` + + +#### HTTP Status Codes + +- #### **nginx.http.status.1xx** +- #### **nginx.http.status.2xx** +- #### **nginx.http.status.3xx** +- #### **nginx.http.status.4xx** +- #### **nginx.http.status.5xx** + + + ``` + Type: counter, integer + Description: Number of requests with HTTP status codes per class. + Source: access.log + ``` + + +- #### **nginx.http.status.403** +- #### **nginx.http.status.404** +- #### **nginx.http.status.500** +- #### **nginx.http.status.502** +- #### **nginx.http.status.503** +- #### **nginx.http.status.504** + + + ``` + Type: counter, integer + Description: Number of requests with specific HTTP status codes above. + Source: access.log + ``` + + +- #### **nginx.http.status.discarded** + + + ``` + Type: counter, integer + Description: Number of requests finalized with status code 499 which is + logged when the client closes the connection. + Source: access.log + ``` + + +## HTTP Protocol Versions + +- #### **nginx.http.v0_9** +- #### **nginx.http.v1_0** +- #### **nginx.http.v1_1** +- #### **nginx.http.v2** + + + ``` + Type: counter, integer + Description: Number of requests using a specific version of the HTTP protocol. + Source: access.log + ``` + + +#### NGINX Process Metrics + +- #### **nginx.workers.count** + + + ``` + Type: gauge, integer + Description: Number of NGINX worker processes observed. + ``` + + +- #### **nginx.workers.cpu.system** +- #### **nginx.workers.cpu.total** +- #### **nginx.workers.cpu.user** + + + ``` + Type: gauge, percent + Description: CPU utilization percentage observed for NGINX worker processes. + ``` + + +- #### **nginx.workers.fds_count** + + + ``` + Type: gauge, integer + Description: Number of file descriptors utilized by NGINX worker processes. + ``` + + +- #### **nginx.workers.io.kbs_r** +- #### **nginx.workers.io.kbs_w** + + + ``` + Type: counter, integer + Description: Number of kilobytes read from or written to disk by NGINX worker processes. + ``` + + +- #### **nginx.workers.mem.rss** +- #### **nginx.workers.mem.vms** + + + ``` + Type: gauge, bytes + Description: Memory utilized by NGINX worker processes. + ``` + + +- #### **nginx.workers.mem.rss_pct** + + + ``` + Type: gauge, percent + Description: Memory utilization percentage for NGINX worker processes. + ``` + + +- #### **nginx.workers.rlimit_nofile** + + + ``` + Type: gauge, integer + Description: Hard limit on the number of file descriptors as seen + by NGINX worker processes. + ``` + + +## Additional NGINX Metrics + +NGINX Amplify Agent can collect many useful metrics, as described below. To enable these metrics, make the following configuration changes. More predefined graphs will be added to the **Graphs** page if NGINX Amplify Agent finds additional metrics. With the required log format configuration, you can build more specific custom graphs. + +The [access.log](http://nginx.org/en/docs/http/ngx_http_log_module.html) log format should include an extended set of NGINX [variables](http://nginx.org/en/docs/varindex.html). Please add a new log format or modify the existing one — and use it with the `access_log` directives in your NGINX configuration. + + ``` + log_format main_ext '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" ' + '"$host" sn="$server_name" ' + 'rt=$request_time ' + 'ua="$upstream_addr" us="$upstream_status" ' + 'ut="$upstream_response_time" ul="$upstream_response_length" ' + 'cs=$upstream_cache_status' ; + ``` + +To use the extended log format with your access log configuration: + + ``` + access_log /var/log/nginx/access.log main_ext; + ``` + +{{< note >}} Please keep in mind that by default, NGINX Amplify Agent will process all access logs that are found in your log directory. If you define a new log file with the extended log format that will contain the entries being already logged to another access log, your metrics might be counted twice. Please refer to the NGINX Amplify Agent configuration section above to learn how to exclude specific log files from processing.{{< /note >}} + +The [error.log](http://nginx.org/en/docs/ngx_core_module.html#error_log) log level should be set to `warn`. + + ``` + error_log /var/log/nginx/error.log warn; + ``` + + **Note.** Don't forget to [reload](http://nginx.org/en/docs/control.html) your NGINX configuration with either `kill -HUP` or `service nginx reload`. + +List of additional metrics that can be collected from the NGINX log files: + +- #### **nginx.http.request.bytes_sent** + + + ``` + Type: counter, integer + Description: Number of bytes sent to clients. + Source: access.log (requires custom log format) + Variable: $bytes_sent + ``` + + +- #### **nginx.http.request.length** + + + ``` + Type: gauge, integer + Description: Request length, including request line, header, and body. + Source: access.log (requires custom log format) + Variable: $request_length + ``` + + +- #### **nginx.http.request.time** +- #### **nginx.http.request.time.count** +- #### **nginx.http.request.time.max** +- #### **nginx.http.request.time.median** +- #### **nginx.http.request.time.pctl95** + + + ``` + Type: gauge, seconds.milliseconds + Description: Request processing time — time elapsed between reading the first bytes from + the client and writing a log entry after the last bytes were sent. + Source: access.log (requires custom log format) + Variable: $request_time + ``` + + +- #### **nginx.http.request.buffered** + + + ``` + Type: counter, integer + Description: Number of requests that were buffered to disk. + Source: error.log (requires 'warn' log level) + ``` + + +- #### **nginx.http.gzip.ratio** + + + ``` + Type: gauge, float + Description: Achieved compression ratio, calculated as the ratio between the original + and compressed response sizes. + Source: access.log (requires custom log format) + Variable: $gzip_ratio + ``` + + +### Upstream Metrics + +- #### **nginx.upstream.connect.time** +- #### **nginx.upstream.connect.time.count** +- #### **nginx.upstream.connect.time.max** +- #### **nginx.upstream.connect.time.median** +- #### **nginx.upstream.connect.time.pctl95** + + + ``` + Type: gauge, seconds.milliseconds + Description: Time spent on establishing connections with upstream servers. With SSL, it + also includes time spent on the handshake. + Source: access.log (requires custom log format) + Variable: $upstream_connect_time + ``` + + +- #### **nginx.upstream.header.time** +- #### **nginx.upstream.header.time.count** +- #### **nginx.upstream.header.time.max** +- #### **nginx.upstream.header.time.median** +- #### **nginx.upstream.header.time.pctl95** + + + ``` + Type: gauge, seconds.milliseconds + Description: Time spent on receiving response headers from upstream servers. + Source: access.log (requires custom log format) + Variable: $upstream_header_time + ``` + + +- #### **nginx.upstream.response.buffered** + + + ``` + Type: counter, integer + Description: Number of upstream responses buffered to disk. + Source: error.log (requires 'warn' log level) + ``` + + +- #### **nginx.upstream.request.count** +- #### **nginx.upstream.next.count** + + + ``` + Type: counter, integer + Description: Number of requests that were sent to upstream servers. + Source: access.log (requires custom log format) + Variable: $upstream_* + ``` + + +- #### **nginx.upstream.request.failed** +- #### **nginx.upstream.response.failed** + + + ``` + Type: counter, integer + Description: Number of requests and responses that failed while proxying. + Source: error.log (requires 'error' log level) + ``` + + +- #### **nginx.upstream.response.length** + + + ``` + Type: gauge, bytes + Description: Average length of the responses obtained from the upstream servers. + Source: access.log (requires custom log format) + Variable: $upstream_response_length + ``` + + +- #### **nginx.upstream.response.time** +- #### **nginx.upstream.response.time.count** +- #### **nginx.upstream.response.time.max** +- #### **nginx.upstream.response.time.median** +- #### **nginx.upstream.response.time.pctl95** + + + ``` + Type: gauge, seconds.milliseconds + Description: Time spent on receiving responses from upstream servers. + Source: access.log (requires custom log format) + Variable: $upstream_response_time + ``` + + +- #### **nginx.upstream.status.1xx** +- #### **nginx.upstream.status.2xx** +- #### **nginx.upstream.status.3xx** +- #### **nginx.upstream.status.4xx** +- #### **nginx.upstream.status.5xx** + + + ``` + Type: counter, integer + Description: Number of responses from upstream servers with specific HTTP status codes. + Source: access.log (requires custom log format) + Variable: $upstream_status + ``` + + +### Cache Metrics + +- #### **nginx.cache.bypass** +- #### **nginx.cache.expired** +- #### **nginx.cache.hit** +- #### **nginx.cache.miss** +- #### **nginx.cache.revalidated** +- #### **nginx.cache.stale** +- #### **nginx.cache.updating** + + + ``` + Type: counter, integer + Description: Various statistics about NGINX cache usage. + Source: access.log (requires custom log format) + Variable: $upstream_cache_status + ``` + + +## NGINX Plus Metrics + +In [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) several additional metrics describing various aspects of NGINX performance are available. The [API module](http://nginx.org/en/docs/http/ngx_http_api_module.html) in NGINX Plus is responsible for collecting and exposing all of the additional counters and gauges. + +The NGINX Plus metrics currently supported by NGINX Amplify Agent are described below. The NGINX Plus metrics have the "plus" prefix in their names. + +Some of the NGINX Plus metrics extracted from the `connections` and the `requests` datasets are used to generate the following server-wide metrics (instead of using the *stub_status* metrics): + +``` +nginx.http.conn.accepted = connections.accepted +nginx.http.conn.active = connections.active +nginx.http.conn.current = connections.active + connections.idle +nginx.http.conn.dropped = connections.dropped +nginx.http.conn.idle = connections.idle +nginx.http.request.count = requests.total +nginx.http.request.current = requests.current +``` + +The NGINX Plus metrics below are collected *per zone*. When configuring a graph using these metrics, please make sure to pick the correct server, upstream, or cache zone. A more granular peer-specific breakdown of the metrics below is currently not supported in NGINX Amplify. + +{{< note >}}NGINX Amplify Agent does not support reporting the following metrics *per zone* but it can be used to display a cumulative sum of values from each zone.{{< /note >}} + +A cumulative metric set is also maintained internally by summing up the per-zone metrics. If you don't configure a specific zone when building graphs, this will result in an "all zones" visualization. E.g., for something like **plus.http.status.2xx** omitting zone will display the instance-wide sum of the successful requests across all zones. + +### Server Zone Metrics + +- #### **plus.http.request.count** +- #### **plus.http.response.count** + + + ``` + Type: counter, integer + Description: Number of client requests received, and responses sent to clients. + Source: NGINX Plus status API + ``` + + +- #### **plus.http.request.bytes_rcvd** +- #### **plus.http.request.bytes_sent** + + + ``` + Type: counter, bytes + Description: Number of bytes received from clients, and bytes sent to clients. + Source: NGINX Plus status API + ``` + + +- #### **plus.http.status.1xx** +- #### **plus.http.status.2xx** +- #### **plus.http.status.3xx** +- #### **plus.http.status.4xx** +- #### **plus.http.status.5xx** + + + ``` + Type: counter, integer + Description: Number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx. + Source: NGINX Plus status API + ``` + + +- #### **plus.http.status.discarded** + + + ``` + Type: counter, integer + Description: Number of requests completed without sending a response. + Source: NGINX Plus status API + ``` + + +- #### **plus.http.ssl.handshakes** + + + ``` + Type: counter, integer + Description: Total number of successful SSL handshakes. + Source: NGINX Plus status API + ``` + + +- #### **plus.http.ssl.failed** + + + ``` + Type: counter, integer + Description: Total number of failed SSL handshakes. + Source: NGINX Plus status API + ``` + + +- #### **plus.http.ssl.reuses** + + + ``` + Type: counter, integer + Description: Total number of session reuses during SSL handshake. + Source: NGINX Plus status API + ``` + + +### Upstream Zone Metrics + +- #### **plus.upstream.peer.count** + + + ``` + Type: gauge, integer + Description: Current number of live ("up") upstream servers in an upstream group. If + graphed/monitored without specifying an upstream, it's the current + number of all live upstream servers in all upstream groups. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.request.count** +- #### **plus.upstream.response.count** + + + ``` + Type: counter, integer + Description: Number of client requests forwarded to the upstream servers, and responses obtained. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.conn.active** + + + ``` + Type: gauge, integer + Description: Current number of active connections to the upstream servers. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.conn.keepalive** + + + ``` + Type: gauge, integer + Description: Сurrent number of idle keepalive connections. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.zombies** + + + ``` + Type: gauge, integer + Description: Current number of servers removed from the group but still processing + active client requests. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.bytes_rcvd** +- #### **plus.upstream.bytes_sent** + + + ``` + Type: counter, integer + Description: Number of bytes received from the upstream servers, and bytes sent. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.status.1xx** +- #### **plus.upstream.status.2xx** +- #### **plus.upstream.status.3xx** +- #### **plus.upstream.status.4xx** +- #### **plus.upstream.status.5xx** + + + ``` + Type: counter, integer + Description: Number of responses from the upstream servers with status codes 1xx, 2xx, + 3xx, 4xx, and 5xx. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.header.time** +- #### **plus.upstream.header.time.count** +- #### **plus.upstream.header.time.max** +- #### **plus.upstream.header.time.median** +- #### **plus.upstream.header.time.pctl95** + + + ``` + Type: gauge, seconds.milliseconds + Description: Average time to get the response header from the upstream servers. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.response.time** +- #### **plus.upstream.response.time.count** +- #### **plus.upstream.response.time.max** +- #### **plus.upstream.response.time.median** +- #### **plus.upstream.response.time.pctl95** + + + ``` + Type: gauge, seconds.milliseconds + Description: Average time to get the full response from the upstream servers. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.fails.count** +- #### **plus.upstream.unavail.count** + + + ``` + Type: counter, integer + Description: Number of unsuccessful attempts to communicate with upstream servers, and + how many times upstream servers became unavailable for client requests. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.health.checks** +- #### **plus.upstream.health.fails** +- #### **plus.upstream.health.unhealthy** + + + ``` + Type: counter, integer + Description: Number of performed health check requests, failed health checks, and + how many times the upstream servers became unhealthy. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.queue.size** + + + ``` + Type: gauge, integer + Description: Current number of queued requests. + Source: NGINX Plus status API + ``` + + +- #### **plus.upstream.queue.overflows** + + + ``` + Type: counter, integer + Description: Number of requests rejected due to queue overflows. + Source: NGINX Plus status API + ``` + + +### Cache Zone Metrics + +- #### **plus.cache.bypass** +- #### **plus.cache.bypass.bytes** +- #### **plus.cache.expired** +- #### **plus.cache.expired.bytes** +- #### **plus.cache.hit** +- #### **plus.cache.hit.bytes** +- #### **plus.cache.miss** +- #### **plus.cache.miss.bytes** +- #### **plus.cache.revalidated** +- #### **plus.cache.revalidated.bytes** +- #### **plus.cache.size** +- #### **plus.cache.stale** +- #### **plus.cache.stale.bytes** +- #### **plus.cache.updating** +- #### **plus.cache.updating.bytes** + + + ``` + Type: counter, integer; counter, bytes + Description: Various statistics about NGINX Plus cache usage. + Source: NGINX Plus status API + ``` + + +### Stream Zone Metrics + +- #### **plus.stream.conn.active** + + + ``` + Type: gauge, integer + Description: Current number of client connections that are currently being processed. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.conn.accepted** + + + ``` + Type: counter, integer + Description: Total number of connections accepted from clients. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.status.2xx** +- #### **plus.stream.status.4xx** +- #### **plus.stream.status.5xx** + + + ``` + Type: counter, integer + Description: Number of sessions completed with status codes 2xx, 4xx, or 5xx. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.discarded** + + + ``` + Type: counter, integer + Description: Total number of connections completed without creating a session. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.bytes_rcvd** +- #### **plus.stream.bytes_sent** + + + ``` + Type: counter, integer + Description: Number of bytes received from clients, and bytes sent. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.peers** + + + ``` + Type: gauge, integer + Description: Current number of live ("up") upstream servers in an upstream group. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.conn.active** + + + ``` + Type: gauge, integer + Description: Current number of connections. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.conn.count** + + + ``` + Type: counter, integer + Description: Total number of client connections forwarded to this server. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.conn.time** +- #### **plus.stream.upstream.conn.time.count** +- #### **plus.stream.upstream.conn.time.max** +- #### **plus.stream.upstream.conn.time.median** +- #### **plus.stream.upstream.conn.time.pctl95** + + + ``` + Type: timer, integer + Description: Average time to connect to an upstream server. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.conn.ttfb** + + + ``` + Type: timer, integer + Description: Average time to receive the first byte of data. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.response.time** + + + ``` + Type: timer, integer + Description: Average time to receive the last byte of data. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.bytes_sent** +- #### **plus.stream.upstream.bytes_rcvd** + + + ``` + Type: counter, integer + Description: Number of bytes received from upstream servers, and bytes sent. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.fails.count** +- #### **plus.stream.upstream.unavail.count** + + + ``` + Type: counter, integer + Description: Number of unsuccessful attempts to communicate with upstream servers, and + how many times upstream servers became unavailable for client requests. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.health.checks** +- #### **plus.stream.upstream.health.fails** +- #### **plus.stream.upstream.health.unhealthy** + + + ``` + Type: counter, integer + Description: Number of performed health check requests, failed health checks, and + how many times the upstream servers became unhealthy. + Source: NGINX Plus status API + ``` + + +- #### **plus.stream.upstream.zombies** + + + ``` + Type: gauge, integer + Description: Current number of servers removed from the group but still + processing active client connections. + Source: NGINX Plus status API + ``` + + +### Slab Zone Metrics + +- #### **plus.slab.pages.used** + + + ``` + Type: gauge, integer + Description: Сurrent number of used memory pages. + Source: NGINX Plus status API + ``` + + +- #### **plus.slab.pages.free** + + + ``` + Type: gauge, integer + Description: Сurrent number of free memory pages. + Source: NGINX Plus status API + ``` + + +- #### **plus.slab.pages.total** + + + ``` + Type: gauge, integer + Description: Sum of free and used memory pages above. + ``` + + +- #### **plus.slab.pages.pct_used** + + + ``` + Type: gauge, percentage + Description: Percentage of free pages. + ``` diff --git a/content/amplify/metrics-metadata/os-metrics.md b/content/amplify/metrics-metadata/os-metrics.md new file mode 100644 index 000000000..620b35411 --- /dev/null +++ b/content/amplify/metrics-metadata/os-metrics.md @@ -0,0 +1,196 @@ +--- +title: OS Metrics +description: List of OS Metrics +weight: 20 +toc: true +tags: ["docs"] +docs: "DOCS-974" +--- + +## System Metrics + +- #### **system.disk.free** +- #### **system.disk.total** +- #### **system.disk.used** + + + ``` + Type: gauge, bytes + Description: System disk usage statistics. + ``` + + +- #### **system.disk.in_use** + + + ``` + Type: gauge, percent + Description: System disk usage statistics, percentage. + ``` + + +- #### **system.io.iops_r** +- #### **system.io.iops_w** + + + ``` + Type: counter, integer + Description: Number of reads or writes per sampling window. + ``` + + +- #### **system.io.kbs_r** +- #### **system.io.kbs_w** + + + ``` + Type: counter, kilobytes + Description: Number of kilobytes read or written. + ``` + + +- #### **system.io.wait_r** +- #### **system.io.wait_w** + + + ``` + Type: gauge, milliseconds + Description: Time spent reading from or writing to disk. + ``` + + +- #### **system.load.1** +- #### **system.load.5** +- #### **system.load.15** + + + ``` + Type: gauge, float + Description: Number of processes in the system run queue, averaged over the last 1, 5, + and 15 min. + ``` + + +- #### **system.mem.available** +- #### **system.mem.buffered** +- #### **system.mem.cached** +- #### **system.mem.free** +- #### **system.mem.shared** +- #### **system.mem.total** +- #### **system.mem.used** + + + ``` + Type: gauge, bytes + Description: Statistics about system memory usage. + ``` + + +- #### **system.mem.pct_used** + + + ``` + Type: gauge, percent + Description: Statistics about system memory usage, percentage. + ``` + + +- #### **system.net.bytes_rcvd** +- #### **system.net.bytes_sent** + + + ``` + Type: counter, bytes + Description: Network I/O statistics. Number of bytes received or sent, per network + interface. + ``` + + +- #### **system.net.drops_in.count** +- #### **system.net.drops_out.count** + + + ``` + Type: counter, integer + Description: Network I/O statistics. Total number of inbound or outbound packets + dropped, per network interface. + ``` + + +- #### **system.net.packets_in.count** +- #### **system.net.packets_out.count** + + + ``` + Type: counter, integer + Description: Network I/O statistics. Number of packets received or sent, per network + interface. + ``` + + +- #### **system.net.packets_in.error** +- #### **system.net.packets_out.error** + + + ``` + Type: counter, integer + Description: Network I/O statistics. Total number of errors while receiving or sending, + per network interface. + ``` + + +- #### **system.net.listen_overflows** + + + ``` + Type: counter, integer + Description: Number of times the listen queue of a socket overflowed. + ``` + + +- #### **system.swap.free** +- #### **system.swap.total** +- #### **system.swap.used** + + + ``` + Type: gauge, bytes + Description: System swap memory statistics. + ``` + + +- #### **system.swap.pct_free** + + + ``` + Type: gauge, percent + Description: System swap memory statistics, percentage. + ``` + +## Agent Metrics + +{{< note >}} Agent metrics are available only if you are using F5 NGINX Amplify Agent.{{< /note >}} + +- #### **amplify.agent.status** + + ``` + Type: internal, integer + Description: 1 - agent is up, 0 - agent is down. + ``` +- #### **amplify.agent.cpu.system** +- #### **amplify.agent.cpu.user** + + ``` + Type: gauge, percent + Description: CPU utilization percentage observed from the NGINX Amplify Agent process. + ``` + + +- #### **amplify.agent.mem.rss** +- #### **amplify.agent.mem.vms** + + + ``` + Type: gauge, bytes + Description: Memory utilized by the NGINX Amplify Agent process. + ``` \ No newline at end of file diff --git a/content/amplify/metrics-metadata/other-metrics.md b/content/amplify/metrics-metadata/other-metrics.md new file mode 100644 index 000000000..62b7d7f2d --- /dev/null +++ b/content/amplify/metrics-metadata/other-metrics.md @@ -0,0 +1,443 @@ +--- +title: Other Metrics +description: Learn about other metrics used by F5 NGINX Amplify +weight: 40 +toc: true +tags: ["docs"] +docs: "DOCS-975" +--- + +{{< note >}}Monitoring PHP-FPM and MySQL metrics is only supported by F5 NGINX Amplify Agent.{{< /note >}} + +## PHP-FPM metrics + +You can also monitor your PHP-FPM applications with NGINX Amplify. NGINX Amplify Agent should run in the same process environment as PHP-FPM, and be able to find the php-fpm processes with *ps(1)*, otherwise, the PHP-FPM metric collection won't work. + +When NGINX Amplify Agent finds a PHP-FPM master process, it tries to auto-detect the path to the PHP-FPM configuration. When the PHP-FPM configuration is found, NGINX Amplify Agent will look up the pool definitions and the corresponding `pm.status_path` directives. + +NGINX Amplify Agent will find all pools and status URIs currently configured. NGINX Amplify Agent then queries the PHP-FPM pool status(es) via FastCGI. There's no need to define HTTP proxy in your NGINX configuration that will point to the PHP-FPM status URIs. + +To start monitoring PHP-FPM, follow the steps below: + +1. Make sure that your PHP-FPM status is enabled for at least one pool — if it's not, uncomment the `pm.status_path` directive for the pool. For PHP7 on Ubuntu, look inside the **/etc/php/7.0/fpm/pool.d** directory to find the pool configuration files. After you've uncommented the `pm.status_path`, please make sure to restart PHP-FPM. + + ```bash + service php7.0-fpm restart + ``` + +2. {{< important >}} Check that NGINX, NGINX Amplify Agent, and the PHP-FPM workers are all run under the same user ID (e.g. `www-data`). You may have to change the used ID for the nginx workers, fix the nginx directories permissions, and then restart NGINX Amplify Agent too. If there are multiple PHP-FPM pools configured with different user IDs, make sure NGINX Amplify Agent's user ID is included in the group IDs of the PHP-FPM workers. This is required in order for NGINX Amplify Agent to access the PHP-FPM pool socket when querying for metrics.{{< /important >}} + +3. Confirm that the listen socket for the PHP-FPM pool you want to monitor and for which you enabled `pm.status_path`, is correctly configured with `listen.owner` and `listen.group`. Look for the following directives inside the pool configuration file. + + ``` + listen.owner = www-data + listen.group = www-data + listen.mode = 0660 + ``` + +4. Confirm that the PHP-FPM listen socket for the pool exists and has the right permissions. + + ```bash + ls -la /var/run/php/php7.0-fpm.sock + srw-rw---- 1 www-data www-data 0 May 18 14:02 /var/run/php/php7.0-fpm.sock + ``` + +5. Confirm that you can query the PHP-FPM status for the pool from the command line: + + ``` + # SCRIPT_NAME=/status SCRIPT_FILENAME=/status QUERY_STRING= REQUEST_METHOD=GET cgi-fcgi -bind -connect /var/run/php/php7.0-fpm.sock + ``` + + Confirm that the command above returns a valid set of PHP-FPM metrics. + + **Note.** The *cgi-fcgi* tool has to be installed separately, usually from the *libfcgi-dev* package. This tool is not required for NGINX Amplify Agent to collect and report PHP-FPM metrics, however it can be used to quickly diagnose possible issues with PHP-FPM metric collection. + +6. If your PHP-FPM is configured to use a TCP socket instead of a Unix domain socket, make sure you can query the PHP-FPM metrics manually with *cgi-fcgi*. Double check that your TCP socket configuration is secure (ideally, PHP-FPM pool listening on 127.0.0.1, and *listen.allowed_clients* enabled as well). + +7. [Update]({{< relref "/amplify/nginx-amplify-agent/install/updating-amplify-agent.md" >}}) NGINX Amplify Agent to the most recent version. + +8. Make sure that the following options are set in **/etc/amplify-agent/agent.conf** + + ``` + [extensions] + phpfpm = True + ``` + +9. Restart NGINX Amplify Agent. + + ```bash + service amplify-agent restart + ``` + +NGINX Amplify Agent should be able to detect the PHP-FPM master and workers, obtain the access to status, and collect the necessary metrics. + +With all of the above successfully configured, the result should be an additional tab displayed on the [Graphs]({{< relref "/amplify/user-interface/graphs.md" >}}) page, with the pre-defined visualization of the PHP-FPM metrics. + +The PHP-FPM metrics on the [Graphs]({{< relref "/amplify/user-interface/graphs.md" >}})) page are cumulative, across all automatically detected pools. If you need per-pool graphs, go to [Dashboards]({{< relref "/amplify/user-interface/dashboards.md" >}}) and create custom graphs per pool. + +Here is the list of caveats to look for if the PHP-FPM metrics are not being collected: + +- No status enabled for any of the pools. +- Different user IDs used by NGINX Amplify Agent and the PHP-FPM workers, or lack of a single group (when using PHP-FPM with a Unix domain socket). +- Wrong permissions configured for the PHP-FPM listen socket (when using PHP-FPM with a Unix domain socket). +- Agent can't connect to the TCP socket (when using PHP-FPM with a TCP socket). +- Agent can't parse the PHP-FPM configuration. A possible workaround is to not have any ungrouped directives. Try to move any ungrouped directives under [global] and pool section headers. + +If checking the above issues didn't help, please enable NGINX Amplify Agent's [debug log]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent.md" >}}), restart NGINX Amplify Agent, wait a few minutes, and then please submit a support request through https://my.f5.com/, please attach the debug log to the support case. + +Below is the list of supported PHP-FPM metrics. + +- #### **php.fpm.conn.accepted** + + + ``` + Type: counter, integer + Description: The number of requests accepted by the pool. + Source: PHP-FPM status (accepted conn) + ``` + + +- #### **php.fpm.queue.current** + + + ``` + Type: gauge, integer + Description: The number of requests in the queue of pending connections. + Source: PHP-FPM status (listen queue) + ``` + + +- #### **php.fpm.queue.max** + + + ``` + Type: gauge, integer + Description: The maximum number of requests in the queue of pending connections since FPM has started. + Source: PHP-FPM status (max listen queue) + ``` + + +- #### **php.fpm.queue.len** + + + ``` + Type: gauge, integer + Description: The size of the socket queue of pending connections. + Source: PHP-FPM status (listen queue len) + ``` + + +- #### **php.fpm.proc.idle** + + + ``` + Type: gauge, integer + Description: The number of idle processes. + Source: PHP-FPM status (idle processes) + ``` + + +- #### **php.fpm.proc.active** + + + ``` + Type: gauge, integer + Description: The number of active processes. + Source: PHP-FPM status (active processes) + ``` + + +- #### **php.fpm.proc.total** + + + ``` + Type: gauge, integer + Description: The number of idle + active processes. + Source: PHP-FPM status (total processes) + ``` + + +- #### **php.fpm.proc.max_active** + + + ``` + Type: gauge, integer + Description: The maximum number of active processes since FPM has started. + Source: PHP-FPM status (max active processes) + ``` + + +- #### **php.fpm.proc.max_child** + + + ``` + Type: gauge, integer + Description: The number of times, the process limit has been reached. + Source: PHP-FPM status (max children reached) + ``` + + +- #### **php.fpm.slow_req** + + + ``` + Type: counter, integer + Description: The number of requests that exceeded request_slowlog_timeout value. + Source: PHP-FPM status (slow requests) + ``` + + +## MySQL metrics + +Version 1.1.0 and above of NGINX Amplify Agent has a plugin for monitoring MySQL databases. Again, NGINX Amplify Agent should run in the same process environment as MySQL, and be able to find the mysqld processes with *ps(1)*. Otherwise, the MySQL metric collection won't work. + +NGINX Amplify Agent doesn't try to find and parse any existing MySQL configuration files. In order for NGINX Amplify Agent to connect to MySQL and collect the metrics, the following steps need to be performed. + +To start monitoring MySQL, follow the instructions below. + +1. Create a new user for NGINX Amplify Agent. + + ```bash + $ mysql -u root -p + [..] + mysql> CREATE USER 'amplify-agent'@'localhost' IDENTIFIED BY 'xxxxxx'; + Query OK, 0 rows affected (0.01 sec) + ``` + +2. Check that the user can read MySQL metrics. + + ```bash + $ mysql -u amplify-agent -p + .. + mysql> show global status; + +-----------------------------------------------+--------------------------------------------------+ + | Variable_name | Value | + +-----------------------------------------------+--------------------------------------------------+ + | Aborted_clients | 0 | + .. + | Uptime_since_flush_status | 1993 | + +-----------------------------------------------+--------------------------------------------------+ + 353 rows in set (0.01 sec) + ``` + + {{< note >}} NGINX Amplify Agent doesn't use *mysql(1)* for metric collection, however it implements a similar query mechanism via a Python module.{{< /note >}} + +3. [Update]({{< relref "/amplify/nginx-amplify-agent/install/updating-amplify-agent.md" >}}) NGINX Amplify Agent to the most recent version. + +4. Add the following to **/etc/amplify-agent/agent.conf** + + ``` + [extensions] + .. + mysql = True + + [mysql] + #host = + #port = + unix_socket = /var/run/mysqld/mysqld.sock + user = amplify-agent + password = xxxxxx + ``` + + where the password option mirrors the password from step 1 above. + +5. Restart NGINX Amplify Agent. + + ``` + # service amplify-agent restart + ``` + +With the above configuration steps NGINX Amplify Agent should be able to detect the MySQL master, obtain the access to status, and collect the necessary metrics. The end result should be an additional tab displayed on the [Graphs]({{< relref "/amplify/user-interface/graphs.md" >}})) page, with the pre-defined visualization of the key MySQL metrics. + +If the above didn't work, please enable NGINX Amplify Agent's [debug log]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent.md#agent-logfile" >}}), restart NGINX Amplify Agent, wait a few minutes, and then create a support request through https://my.f5.com/, please attach the debug log to the support case. + +NGINX Amplify Agent retrieves most of the metrics from the MySQL global [status variables](https://dev.mysql.com/doc/refman/5.7/en/server-status-variables.html). + +Below is the list of supported MySQL metrics. + +- #### **mysql.global.connections** + + + ``` + Type: counter, integer + Description: The number of connection attempts (successful or not) to the MySQL server. + Source: SHOW GLOBAL STATUS LIKE "Connections"; + ``` + + +- #### **mysql.global.questions** + + + ``` + Type: counter, integer + Description: The number of statements executed by the server. See MySQL reference manual for details. + Source: SHOW GLOBAL STATUS LIKE "Questions"; + ``` + + +- #### **mysql.global.select** + + + ``` + Type: counter, integer + Description: The number of times a select statement has been executed. + Source: SHOW GLOBAL STATUS LIKE "Com_select"; + ``` + + +- #### **mysql.global.insert** + + + ``` + Type: counter, integer + Description: The number of times an insert statement has been executed. + Source: SHOW GLOBAL STATUS LIKE "Com_insert"; + ``` + + +- #### **mysql.global.update** + + + ``` + Type: counter, integer + Description: The number of times an update statement has been executed. + Source: SHOW GLOBAL STATUS LIKE "Com_update"; + ``` + + +- #### **mysql.global.delete** + + + ``` + Type: counter, integer + Description: The number of times a delete statement has been executed. + Source: SHOW GLOBAL STATUS LIKE "Com_delete"; + ``` + + +- #### **mysql.global.writes** + + + ``` + Type: counter, integer + Description: Sum of insert, update, and delete counters above. + ``` + + +- #### **mysql.global.commit** + + + ``` + Type: counter, integer + Description: The number of times a commit statement has been executed. + Source: SHOW GLOBAL STATUS LIKE "Com_commit"; + ``` + + +- #### **mysql.global.slow_queries** + + + ``` + Type: counter, integer + Description: The number of queries that have taken more than long_query_time seconds. + Source: SHOW GLOBAL STATUS LIKE "Slow_queries"; + ``` + + +- #### **mysql.global.uptime** + + + ``` + Type: counter, integer + Description: The number of seconds that the server has been up. + Source: SHOW GLOBAL STATUS LIKE "Uptime"; + ``` + + +- #### **mysql.global.aborted_connects** + + + ``` + Type: counter, integer + Description: The number of failed attempts to connect to the MySQL server. + Source: SHOW GLOBAL STATUS LIKE "Aborted_connects"; + ``` + + +- #### **mysql.global.innodb_buffer_pool_read_requests** + + + ``` + Type: counter, integer + Description: The number of logical read requests. + Source: SHOW GLOBAL STATUS LIKE "Innodb_buffer_pool_read_requests"; + ``` + + +- #### **mysql.global.innodb_buffer_pool_reads** + + + ``` + Type: counter, integer + Description: The number of logical reads that InnoDB could not satisfy from the buffer + pool, and had to read directly from disk. + Source: SHOW GLOBAL STATUS LIKE "Innodb_buffer_pool_reads"; + ``` + + +- #### **mysql.global.innodb_buffer_pool.hit_ratio** + + + ``` + Type: gauge, percentage + Description: Hit ratio reflecting the efficiency of the InnoDB buffer pool. + ``` + + +- #### **mysql.global.innodb_buffer_pool_pages_total** + + + ``` + Type: gauge, integer + Description: The total size of the InnoDB buffer pool, in pages. + Source: SHOW GLOBAL STATUS LIKE "Innodb_buffer_pool_pages_total"; + ``` + + +- #### **mysql.global.innodb_buffer_pool_pages_free** + + + ``` + Type: gauge, integer + Description: The number of free pages in the InnoDB buffer pool. + Source: SHOW GLOBAL STATUS LIKE "Innodb_buffer_pool_pages_free"; + ``` + + +- #### **mysql.global.innodb_buffer_pool_util** + + + ``` + Type: gauge, percentage + Description: InnoDB buffer pool utilization. + ``` + + +- #### **mysql.global.threads_connected** + + + ``` + Type: gauge, integer + Description: The number of currently open connections. + Source: SHOW GLOBAL STATUS LIKE "Threads_connected"; + ``` + + +- #### **mysql.global.threads_running** + + + ``` + Type: gauge, integer + Description: The number of threads that are not sleeping. + Source: SHOW GLOBAL STATUS LIKE "Threads_running"; + ``` diff --git a/content/amplify/nginx-amplify-agent/_index.md b/content/amplify/nginx-amplify-agent/_index.md new file mode 100644 index 000000000..e038bdb98 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/_index.md @@ -0,0 +1,5 @@ +--- +title: Using NGINX Amplify Agent +weight: 300 +url: /nginx-amplify/nginx-amplify-agent/ +--- diff --git a/content/amplify/nginx-amplify-agent/amplify-agent-overview.md b/content/amplify/nginx-amplify-agent/amplify-agent-overview.md new file mode 100644 index 000000000..a42891ce6 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/amplify-agent-overview.md @@ -0,0 +1,27 @@ +--- +title: NGINX Amplify Agent Overview +description: Learn about F5 NGINX Amplify Agent. +weight: 1 +toc: true +tags: ["docs"] +docs: "DOCS-960" +--- + +F5 NGINX Amplify Agent is a compact application written in Python. Its role is to collect various metrics and metadata and send them securely to the backend for storage and visualization. + +You need to install NGINX Amplify Agent on all hosts you want to monitor. + +Once you install NGINX Amplify Agent, it will automatically begin sending metrics. You can expect to see real-time metrics in the NGINX Amplify web interface within about a minute. + +NGINX Amplify can currently monitor and collect performance metrics for: + + 1. Operating system (see the list of supported OS [here]({{< relref "/amplify/faq/nginx-amplify-agent#what-operating-systems-are-supported" >}}))) + 2. NGINX and NGINX Plus + 3. [PHP-FPM]({{< relref "/amplify/metrics-metadata/other-metrics.md#php-fpm-metrics" >}}) + 4. [MySQL]({{< relref "/amplify/metrics-metadata/other-metrics.md#mysql-metrics" >}}) + +The NGINX Amplify Agent identifies an NGINX instance as any running NGINX master process with either a unique binary path or a unique configuration. + +{{< note >}}There's no need to manually add or configure anything in the web interface after installing NGINX Amplify Agent. When NGINX Amplify Agent is started, the metrics and the metadata are automatically reported to the Amplify backend and visualized in the web interface.{{< /note >}} + +When an NGINX instance is no longer in use it must be manually deleted in the web interface. The "Remove object" button can be found in the metadata viewer popup — see the [User Interface]({{< relref "/amplify/user-interface/">}}) documentation. \ No newline at end of file diff --git a/content/amplify/nginx-amplify-agent/configuration-analysis.md b/content/amplify/nginx-amplify-agent/configuration-analysis.md new file mode 100644 index 000000000..424058f55 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/configuration-analysis.md @@ -0,0 +1,15 @@ +--- +title: NGINX Configuration Analysis +description: Learn about F5 NGINX Amplify Agent's configuration analysis feature. +weight: 600 +toc: true +tags: ["docs"] +docs: "DOCS-961" +--- + +F5 NGINX Amplify Agent can automatically find all relevant NGINX configuration files, parse them, extract their logical structure, and send the associated JSON data to the Amplify backend for further analysis and reporting. For more information on configuration analysis, please see the [Analyzer]({{< relref "/amplify/user-interface/analyzer.md" >}})) documentation. + +After NGINX Amplify Agent finds a particular NGINX configuration, it then automatically starts to keep track of its changes. When a change is detected with NGINX — e.g., a master process restarts, or the NGINX config is edited, an update is sent to the Amplify backend. + +{{< note >}} NGINX Amplify Agent never sends the raw unprocessed config files to the backend system. In addition, the following directives in the NGINX configuration are never analyzed — and their parameters aren't exported to the SaaS backend: +[ssl_certificate_key](http://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_certificate_key), [ssl_client_certificate](http://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_client_certificate), [ssl_password_file](http://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_password_file), [ssl_stapling_file](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling_file), [ssl_trusted_certificate](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_trusted_certificate), [auth_basic_user_file](http://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file), [secure_link_secret](http://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link_secret).{{< /note >}} diff --git a/content/amplify/nginx-amplify-agent/configuring-metric-collection.md b/content/amplify/nginx-amplify-agent/configuring-metric-collection.md new file mode 100644 index 000000000..168e82b8d --- /dev/null +++ b/content/amplify/nginx-amplify-agent/configuring-metric-collection.md @@ -0,0 +1,122 @@ +--- +title: Configuring NGINX for Metric Collection +description: Learn how to configure NGINX Instances to collect data. +weight: 400 +toc: true +tags: ["docs"] +docs: "DOCS-963" +--- + +To monitor an NGINX instance, F5 NGINX Amplify Agent must [find the relevant NGINX master process]({{< relref "/amplify/nginx-amplify-agent/detecting-monitoring-instances" >}}) and determine its key characteristics. + +## Metrics from stub_status + +You must define [stub_status](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) in your NGINX configuration for key NGINX graphs to appear in the web interface. If `stub_status` is already enabled, NGINX Amplify Agent should be able to locate it automatically. + +If you're using NGINX Plus, you must configure either the `stub_status` module or the NGINX Plus [API module](http://nginx.org/en/docs/http/ngx_http_api_module.html). + +Without `stub_status` or the NGINX Plus status API, NGINX Amplify Agent will NOT be able to collect key NGINX metrics required for further monitoring and analysis. + +Add the `stub_status` configuration as follows: + +```bash + +# cd /etc/nginx + +# grep -i include\.*conf nginx.conf + include /etc/nginx/conf.d/*.conf; + +# cat > conf.d/stub_status.conf +server { + listen 127.0.0.1:80; + server_name 127.0.0.1; + location /nginx_status { + stub_status on; + allow 127.0.0.1; + deny all; + } +} + + +# ls -la conf.d/stub_status.conf +-rw-r--r-- 1 root root 162 Nov 4 02:40 conf.d/stub_status.conf + +# nginx -t +nginx: the configuration file /etc/nginx/nginx.conf syntax is ok +nginx: configuration file /etc/nginx/nginx.conf test is successful + +# kill -HUP `cat /var/run/nginx.pid` +``` + +Test your nginx configuration after you've added the `stub_status` section above. Make sure there's no ambiguity with either [listen](http://nginx.org/en/docs/http/ngx_http_core_module.html#listen) or [server_name](http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) configuration. NGINX Amplify Agent should be able to identify the [stub_status](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) URL and will default to use 127.0.0.1 if the configuration is incomplete. + +{{< note >}} If you use the `conf.d*`directory to keep common parts of your NGINX configuration that are then automatically included in the [server](http://nginx.org/en/docs/http/ngx_http_core_module.html#server) sections across your NGINX config, do not use the snippet above. Instead, you should configure [stub_status](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) manually within an appropriate location or server block. {{< /note >}} + +The above is an example `nginx_status` URI for [stub_status](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html). NGINX Amplify Agent will determine the correct URI automatically upon parsing your NGINX configuration. Please make sure that the directory and the actual configuration file with `stub_status` are readable by NGINX Amplify Agent; otherwise, NGINX Amplify Agent won't be able to determine the `stub_status` URL correctly. If NGINX Amplify Agent fails to find `stub_status`, please refer to the workaround described [here]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#configuring-the-url-for-stub_status-or-status-api" >}}). + +Please ensure the `stub_status` [ACL](http://nginx.org/en/docs/http/ngx_http_access_module.html) is correctly configured, especially if your system is IPv6-enabled. Test the reachability of `stub_status` metrics with `wget(1)` or `curl(1)`. When testing, use the exact URL matching your NGINX configuration. + +For more information about `stub_status`, please refer to the NGINX documentation [here](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html). + +If everything is configured properly, you should see something along these lines when testing it with `curl(1)`: + +```bash +$ curl http://127.0.0.1/nginx_status +Active connections: 2 +server accepts handled requests + 344014 344014 661581 +Reading: 0 Writing: 1 Waiting: 1 +``` + +If the command doesn't produce the expected output, confirm where the requests to `/nginx_status` are being routed. In many cases other [server](http://nginx.org/en/docs/http/ngx_http_core_module.html#server) blocks can be why you can't access `stub_status`. + +NGINX Amplify Agent uses data from `stub_status` to calculate metrics related to server-wide HTTP connections and requests as described below: + +```nginx +nginx.http.conn.accepted = stub_status.accepts +nginx.http.conn.active = stub_status.active - stub_status.waiting +nginx.http.conn.current = stub_status.active +nginx.http.conn.dropped = stub_status.accepts - stub_status.handled +nginx.http.conn.idle = stub_status.waiting +nginx.http.request.count = stub_status.requests +nginx.http.request.current = stub_status.reading + stub_status.writing +nginx.http.request.reading = stub_status.reading +nginx.http.request.writing = stub_status.writing +``` + +For NGINX Plus, NGINX Amplify Agent will automatically use similar metrics available from the status API. + +For more information about the metric list, please refer to [Metrics and Metadata]({{< relref "/amplify/metrics-metadata" >}}). + +## Metrics from access.log and error.log + +NGINX Amplify Agent will also collect more NGINX metrics from the [access.log](http://nginx.org/en/docs/http/ngx_http_log_module.html) and the [error.log](http://nginx.org/en/docs/ngx_core_module.html#error_log) files. To do that, NGINX Amplify Agent should be able to read the logs. Ensure that the `nginx` user or the user [defined in the NGINX config](http://nginx.org/en/docs/ngx_core_module.html#user) (such as `www-data`) can read the log files. Please also make sure that the log files are being written normally. + +You don't have to specifically point NGINX Amplify Agent to either the NGINX configuration or the NGINX log files — it should detect their location automatically. + +NGINX Amplify Agent will also try to detect the [log format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) for a particular log to parse it properly and try to extract even more useful metrics, e.g., [$upstream_response_time](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_response_time). + +{{< note >}}Several metrics outlined in [Metrics and Metadata]({{< relref "metrics-metadata" >}}) will only be available if the corresponding variables are included in a custom [access.log](http://nginx.org/en/docs/http/ngx_http_log_module.html) format used for logging requests. You can find a complete list of NGINX log variables [here](http://nginx.org/en/docs/varindex.html).{{< /note >}} + +## Using Syslog for Metric Collection + +If you configured NGINX Amplify Agent for syslog metric collection (see the [configuring syslog]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#configuring-syslog" >}}) documentation), make sure to add the following settings to the NGINX configuration: + + 1. Check that you are using NGINX version 1.9.5 or newer (or NGINX Plus Release 8 or newer). + 2. Edit the NGINX configuration file and specify the syslog listener address as the first parameter to the [access.log](http://nginx.org/en/docs/http/ngx_http_log_module.html) directive. Include the `amplify` tag, and your preferred log format: + + ```nginx + access_log syslog:server=127.0.0.1:12000,tag=amplify,severity=info main_ext; + ``` + + (see also how to extend the NGINX log format to collect [additional metrics]({{< relref "/amplify/metrics-metadata/nginx-metrics#additional-nginx-metrics" >}})) + + 3. Reload NGINX: + + ```bash + # service nginx reload + ``` + + (see more [here](http://nginx.org/en/docs/control.html)) + +{{< note >}}To send the NGINX logs to both the existing logging facility and NGINX Amplify Agent, include a separate [access.log](http://nginx.org/en/docs/http/ngx_http_log_module.html) directive for each destination.{{< /note >}} diff --git a/content/amplify/nginx-amplify-agent/detecting-monitoring-instances.md b/content/amplify/nginx-amplify-agent/detecting-monitoring-instances.md new file mode 100644 index 000000000..fa35ddfc3 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/detecting-monitoring-instances.md @@ -0,0 +1,20 @@ +--- +title: Detecting and Monitoring NGINX Instances +description: Learn how F5 NGINX Amplify Agent detects NGINX Instances. +weight: 300 +toc: true +tags: ["docs"] +docs: "DOCS-962" +--- + +F5 NGINX Amplify Agent is capable of detecting several types of NGINX instances: + + * Installed from a repository package + * Built and installed manually + +A separate instance of NGINX, as seen by NGINX Amplify Agent, would be the following: + + * A unique master process and its workers, started with an **absolute path** to a distinct NGINX binary + * A master process running with a default config path; or with a custom path set in the command-line parameters + +{{< note >}}NGINX Amplify Agent will try to detect and monitor all unique NGINX instances currently running on a host. Separate sets of metrics and metadata are collected for each unique NGINX instance. {{< /note >}} diff --git a/content/amplify/nginx-amplify-agent/install/_index.md b/content/amplify/nginx-amplify-agent/install/_index.md new file mode 100644 index 000000000..23af3e2ce --- /dev/null +++ b/content/amplify/nginx-amplify-agent/install/_index.md @@ -0,0 +1,5 @@ +--- +title: Install and Configure NGINX Amplify Agent +weight: 10 +url: /nginx-amplify/install/ +--- diff --git a/content/amplify/nginx-amplify-agent/install/configuring-amplify-agent.md b/content/amplify/nginx-amplify-agent/install/configuring-amplify-agent.md new file mode 100644 index 000000000..82d012fc4 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/install/configuring-amplify-agent.md @@ -0,0 +1,172 @@ +--- +title: Configuring NGINX Amplify Agent +description: Learn how to configure F5 NGINX Amplify Agent. +weight: 300 +toc: true +tags: ["docs"] +docs: "DOCS-967" +--- + +F5 NGINX Amplify Agent keeps its configuration in `/etc/amplify-agent/agent.conf`. The NGINX Amplify Agent configuration is a text-based file. + +## Overriding the Effective User ID + +NGINX Amplify Agent will drop *root* privileges on startup. By default, it will then use the user ID of the user `nginx` to set its effective user ID. The package install procedure will add the `nginx` user automatically unless it's already found in the system. If the [user](http://nginx.org/en/docs/ngx_core_module.html#user) directive appears in the NGINX configuration, NGINX Amplify Agent will pick up the user specified in the NGINX config for its effective user ID (e.g. `www-data`). + +NGINX Amplify Agent and the running NGINX instances need to use the same user ID for NGINX Amplify Agent to collect all NGINX metrics properly. + +If you would like to manually specify the user ID that NGINX Amplify Agent should use for its effective user ID, there's a specialized section in `/etc/amplify-agent/agent.conf` for that: + +```nginx +[nginx] +user = +configfile = /etc/nginx/nginx.conf +``` + +The first option allows to explicitly set the real user ID, which NGINX Amplify Agent should pick for its effective user ID. If the `user` directive has a non-empty parameter, the NGINX Amplify Agent startup script will use it to look up the real user ID. + +The second option explicitly tells NGINX Amplify Agent where to look for an NGINX configuration file suitable for detecting the real user ID, `/etc/nginx/nginx.conf` by default. + +## Changing the API Key + +When you install NGINX Amplify Agent for the first time using the procedure above, your API key is written to the `agent.conf` file automatically. If you ever need to change the API key, please edit the following section in `agent.conf` accordingly: + +```nginx +[credentials] +api_key = YOUR_API_KEY +``` + +## Changing the Hostname and UUID + +To create unique objects for monitoring, NGINX Amplify Agent must be able to extract a valid hostname from the system. The hostname is also utilized as one of the components for generating a unique identifier. Essentially, the hostname and the UUID unambiguously identify a particular instance of NGINX Amplify Agent to the Amplify backend. If the hostname or the UUID are changed, NGINX Amplify Agent and the backend will register a new object for monitoring. + +When first generated, the UUID is written to `agent.conf`. Typically this happens automatically when NGINX Amplify Agent starts and successfully detects the hostname for the first time. Normally you shouldn't change the UUID in `agent.conf`. + +NGINX Amplify Agent will try its best to determine the correct hostname. If it fails to determine the hostname, you can set the hostname manually in the `agent.conf` file. Check for the following section, and put the desired hostname in here: + +```nginx +[credentials] +.. +hostname = myhostname1 +``` + +NGINX Amplify Agent won't start unless a valid hostname is defined. The following *aren't* valid hostnames: + + * localhost + * localhost.localdomain + * localhost6.localdomain6 + * ip6-localhost + +{{< note >}} You can also use the above method to replace the system's hostname with an arbitrary alias. Remember that if you redefine the hostname for a live object, the existing object will be marked as failed in the web interface. Redefining the hostname in NGINX Amplify Agent's configuration creates a new UUID and a new system for monitoring. {{< /note >}} + +Alternatively, you can define an "alias" for the host in the UI (see the [Graphs]({{< relref "/amplify/user-interface/graphs" >}}) section). + +## Configuring the URL for stub_status or Status API + +When NGINX Amplify Agent finds a running NGINX instance, it automatically detects the [stub_status](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) or the NGINX Plus [API module](http://nginx.org/en/docs/http/ngx_http_api_module.html) locations from the NGINX configuration. + +To override the *stub_status* URI/URL, use the `stub_status` configuration option. + +```nginx +[nginx] +.. +stub_status = http://127.0.0.1/nginx_status +``` + +To override the URI detection of the status API, use the `plus_status` option. + +```nginx +[nginx] +.. +plus_status = /status +``` + +{{< note >}} If only the URI part is specified with the options above, NGINX Amplify Agent will use `http://127.0.0.1` to construct the full URL to access either the *stub_status* or the NGINX Plus status API metrics. {{< /note >}} + +## Configuring the Path to the NGINX Configuration File + +NGINX Amplify Agent detects the NGINX configuration file *automatically*. You don't need to explicitly point NGINX Amplify Agent to the NGINX `conf` file. + +If NGINX Amplify Agent cannot find the NGINX configuration, use the following option in `/etc/amplify-agent/agent.`conf`: + +```nginx +[nginx] +configfile = /etc/nginx/nginx.conf +``` + +{{< note >}} It is better to avoid using this option and only add it as a workaround. We'd appreciate it if you took some time to fill out a support ticket in case you had to manually add the path to the NGINX config file. {{< /note >}} + +## Configuring Host Tags + +You can define arbitrary tags on a "per-host" basis. Tags can be configured in the UI (see the [Graphs]({{< relref "/amplify/user-interface/graphs" >}}) documentation), or set in the `/etc/amplify-agent.conf` file: + +```nginx +[tags] +tags = foo,bar,foo:bar +``` + +You can use tags to build custom graphs, configure alerts, and filter the systems on the [Graphs]({{< relref "/amplify/user-interface/graphs" >}}) page. + +## Configuring Syslog + +NGINX Amplify Agent can collect the NGINX log files via `syslog`. This could be useful when you don't keep the NGINX logs on disk or when monitoring a container environment such as [Docker](https://github.com/nginxinc/docker-nginx-amplify) with NGINX Amplify. + +To configure NGINX Amplify Agent for syslog, add the following to the `/etc/amplify-agent/agent.conf` file: + +```nginx +[listeners] +keys = syslog-default + +[listener_syslog-default] +address = 127.0.0.1:12000 +``` + +Restart NGINX Amplify Agent to have it reload the configuration and start listening on the specified IP address and port: + +```bash +service amplify-agent restart +``` + +Make sure to [add the `syslog` settings]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#configuring-syslog" >}}) to your NGINX configuration as well. + +## Excluding Certain NGINX Log Files + +By default, NGINX Amplify Agent will try to find and watch all the `access.log` files described in the NGINX configuration. If there are multiple log files where the same request is logged, the metrics may get counted more than once. + +To exclude specific NGINX log files from the metric collection, add an exclusion to the `/etc/amplify-agent/agent.conf` as in the following example: + +```nginx +[nginx] +exclude_logs=/var/log/nginx/app1/*,access-app1-*.log,sender1-*.log +``` + +## Setting Up a Proxy + +If your system is in a DMZ environment without direct access to the Internet, the only way for NGINX Amplify Agent to report collected metrics to Amplify is through a proxy. + +NGINX Amplify Agent will use the usual environment variables common on Linux systems (e.g. `https_proxy` or `HTTP_PROXY`). However, you can also define an HTTPS proxy manually in `agent.conf` file, as in the following example: + +```nginx +[proxies] +https = https://10.20.30.40:3030 +.. +``` + +## Agent Logfile + +NGINX Amplify Agent maintains its log file in `/var/log/amplify-agent/agent.log` + +Upon installation, NGINX Amplify Agent's log rotation schedule is added to `/etc/logrotate.d/amplify-agent`. + +The default level of logging for NGINX Amplify Agent is `INFO`. If you ever need to debug NGINX Amplify Agent, change the level to `DEBUG` as described below. The log file size can grow fast when using the `DEBUG` level. After you change the log level, please [restart]({{< relref "/amplify/nginx-amplify-agent/install/installing-amplify-agent#starting-and-stopping-the-agent" >}}) NGINX Amplify Agent. + +```nginx +[logger_agent-default] +level = DEBUG +.. + +[handler_agent-default] +class = logging.handlers.WatchedFileHandler +level = DEBUG +.. +``` diff --git a/content/amplify/nginx-amplify-agent/install/installing-amplify-agent.md b/content/amplify/nginx-amplify-agent/install/installing-amplify-agent.md new file mode 100644 index 000000000..08a6bd29e --- /dev/null +++ b/content/amplify/nginx-amplify-agent/install/installing-amplify-agent.md @@ -0,0 +1,165 @@ +--- +title: Install NGINX Amplify Agent +description: Learn how to install F5 NGINX Amplify Agent. +weight: 100 +toc: true +tags: ["docs"] +docs: "DOCS-968" +--- + +To use F5 NGINX Amplify to monitor your infrastructure, you need to install NGINX Amplify Agent on each system you wish to monitor. + +{{< note >}} NGINX Amplify Agent will drop *root* privileges on startup. It will then use the user ID of the user `nginx` to set its effective user ID. The package install procedure will add the `nginx` user automatically unless it's already found in the system. If the [user](http://nginx.org/en/docs/ngx_core_module.html#user) directive appears in the NGINX configuration, NGINX Amplify Agent will pick up the user specified in the NGINX config for its effective user ID (e.g. `www-data`). {{< /note >}} + +## Using the Install Script + +Take the following steps to install NGINX Amplify Agent: + +1. Download and run the install script. + + ```bash + curl -sS -L -O \ + https://github.com/nginxinc/nginx-amplify-agent/raw/master/packages/install.sh && \ + API_KEY='YOUR_API_KEY' sh ./install.sh + ``` + + Where YOUR_API_KEY is a unique API key assigned to your Amplify account. You will see the API key when adding a new system in the Amplify web interface. You can also find it in the **Account** menu. + +2. Verify that NGINX Amplify Agent has started. + + ```bash + ps ax | grep -i 'amplify\-' + 2552 ? S 0:00 amplify-agent + ``` + +## Installing NGINX Amplify Agent Manually + +### Installing on Ubuntu or Debian + +1. Add the NGINX public key. + + ```bash + curl -fs http://nginx.org/keys/nginx_signing.key | apt-key add - + ``` + + or + + ```bash + wget -q -O - \ + http://nginx.org/keys/nginx_signing.key | apt-key add - + ``` + +2. Configure the repository as follows. + + ```bash + codename=`lsb_release -cs` && \ + os=`lsb_release -is | tr '[:upper:]' '[:lower:]'` && \ + echo "deb http://packages.amplify.nginx.com/${os}/ ${codename} amplify-agent" > \ + /etc/apt/sources.list.d/nginx-amplify.list + ``` + +3. Verify the repository config file (Ubuntu 14.04 example follows). + + ```bash + cat /etc/apt/sources.list.d/nginx-amplify.list + deb http://packages.amplify.nginx.com/ubuntu/ trusty amplify-agent + ``` + +4. Update the package index files. + + ```bash + apt-get update + ``` + +5. Install and run NGINX Amplify Agent. + + ```bash + apt-get install nginx-amplify-agent + ``` + +### Installing on CentOS, Red Hat Linux, or Amazon Linux + +1. Add the NGINX public key. + + ```bash + curl -sS -L -O http://nginx.org/keys/nginx_signing.key && \ + rpm --import nginx_signing.key + ``` + + or + + ```bash + wget -q -O nginx_signing.key http://nginx.org/keys/nginx_signing.key && \ + rpm --import nginx_signing.key + ``` + +2. Create the repository config as follows (mind the correct release number). + + Use the first snippet below for CentOS and Red Hat Linux. The second one applies to Amazon Linux. + + ```bash + release="7" && \ + printf "[nginx-amplify]\nname=nginx amplify repo\nbaseurl=http://packages.amplify.nginx.com/centos/${release}/\$basearch\ngpgcheck=1\nenabled=1\n" > \ + /etc/yum.repos.d/nginx-amplify.repo + ``` + + ```bash + release="latest" && \ + printf "[nginx-amplify]\nname=nginx amplify repo\nbaseurl=http://packages.amplify.nginx.com/amzn/${release}/\$basearch\ngpgcheck=1\nenabled=1\n" > \ + /etc/yum.repos.d/nginx-amplify.repo + ``` + +3. Verify the repository config file (RHEL 7.1 example follows). + + ```bash + cat /etc/yum.repos.d/nginx-amplify.repo + [nginx-amplify] + name=nginx repo + baseurl=http://packages.amplify.nginx.com/centos/7/$basearch + gpgcheck=1 + enabled=1 + ``` + +4. Update the package metadata. + + ```bash + yum makecache + ``` + +5. Install and run NGINX Amplify Agent. + + ```bash + yum install nginx-amplify-agent + ``` + +## Creating the Config File from a Template + +```bash +api_key="YOUR_API_KEY" && \ +sed "s/api_key.*$/api_key = ${api_key}/" \ +/etc/amplify-agent/agent.conf.default > \ +/etc/amplify-agent/agent.conf +``` + +API_KEY is a unique API key assigned to your Amplify account. You will see your API key when adding a new system using the Amplify web interface. You can also find the API key in the *Account* menu. + +## Starting and Stopping NGINX Amplify Agent + +```bash +service amplify-agent start +``` + +```bash +service amplify-agent stop +``` + +```bash +service amplify-agent restart +``` + +## Verifying that NGINX Amplify Agent Has Started + +```bash +ps ax | grep -i 'amplify\-' +2552 ? S 0:00 amplify-agent +``` diff --git a/content/amplify/nginx-amplify-agent/install/uninstalling-amplify-agent.md b/content/amplify/nginx-amplify-agent/install/uninstalling-amplify-agent.md new file mode 100644 index 000000000..a74142dc2 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/install/uninstalling-amplify-agent.md @@ -0,0 +1,35 @@ +--- +title: Uninstall NGINX Amplify Agent +description: Learn how to uninstall F5 NGINX Amplify Agent. +weight: 400 +toc: true +tags: ["docs"] +docs: "DOCS-969" +--- + +To completely delete a previously monitored object, perform the following steps: + + +### Uninstall F5 NGINX Amplify Agent + +- On Ubuntu/Debian use: + + ```bash + apt-get remove nginx-amplify-agent + ``` + +- On CentOS and Red Hat use: + + ```bash + yum remove nginx-amplify-agent + ``` + +### Delete objects from the web interface + +To delete a system using the web interface — find it in the [Inventory]({{< relref "/amplify/user-interface/inventory" >}}), and click on the "Trash" icon. + +Deleting objects in the UI will not stop NGINX Amplify Agent. To completely remove a system from monitoring, stop and uninstall NGINX Amplify Agent first, then clean it up in the web interface. + +### Delete alerts + + Check the [Alerts]({{< relref "/amplify/user-interface/alerts" >}}) page and remove or mute the irrelevant rules. diff --git a/content/amplify/nginx-amplify-agent/install/updating-amplify-agent.md b/content/amplify/nginx-amplify-agent/install/updating-amplify-agent.md new file mode 100644 index 000000000..02e2eba92 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/install/updating-amplify-agent.md @@ -0,0 +1,26 @@ +--- +title: Update NGINX Amplify Agent +description: Learn how to update F5 NGINX Amplify Agent. +weight: 200 +toc: true +tags: ["docs"] +docs: "DOCS-970" +--- + +{{< important >}} +It is *highly* recommended that you periodically check for updates and install the latest stable version of F5 NGINX Amplify Agent. +{{< /important >}} + + 1. Updating NGINX Amplify Agent On Ubuntu/Debian + + ```bash + apt-get update && \ + apt-get install nginx-amplify-agent + ``` + + 2. Updating NGINX Amplify Agent On CentOS/Red Hat + + ```bash + yum makecache && \ + yum update nginx-amplify-agent + ``` \ No newline at end of file diff --git a/content/amplify/nginx-amplify-agent/install/using-amplify-agent-docker.md b/content/amplify/nginx-amplify-agent/install/using-amplify-agent-docker.md new file mode 100644 index 000000000..d2030e8a0 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/install/using-amplify-agent-docker.md @@ -0,0 +1,12 @@ +--- +title: Use NGINX Amplify Agent with Docker +description: Learn how to use F5 NGINX Amplify Agent with Docker. +weight: 500 +toc: true +tags: ["docs"] +docs: "DOCS-971" +--- + +You can use F5 NGINX Amplify Agent in a Docker environment. Although it's still work-in-progress, NGINX Amplify Agent can collect most of the metrics and send them over to the Amplify backend in either "standalone" or "aggregate" mode. The standalone mode of operation is the simplest one, with a separate "host" created for each Docker container. Alternatively, the metrics from NGINX Amplify Agents running in different containers can be aggregated on a "per-image" basis — this is the aggregate mode of deploying NGINX Amplify Agent with Docker. + +For more information, please refer to our [Amplify Dockerfile](https://github.com/nginxinc/docker-nginx-amplify) repository. diff --git a/content/amplify/nginx-amplify-agent/metadata-metrics-collection.md b/content/amplify/nginx-amplify-agent/metadata-metrics-collection.md new file mode 100644 index 000000000..cf3a88ac7 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/metadata-metrics-collection.md @@ -0,0 +1,27 @@ +--- +title: Metadata and Metrics Collection +description: Learn how F5 NGINX Amplify Agent collects data. +weight: 200 +toc: true +tags: ["docs"] +docs: "DOCS-964" +--- + +F5 NGINX Amplify Agent collects the following types of data: + + * **NGINX metrics.** NGINX Amplify Agent collects a lot of NGINX related metrics from [stub_status](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html), the NGINX Plus status API, the NGINX log files, and from the NGINX process state. + * **System metrics.** These are various key metrics describing the system, e.g., CPU usage, memory usage, network traffic, etc. + * **PHP-FPM metrics.** NGINX Amplify Agent can obtain metrics from the PHP-FPM pool status if it detects a running PHP-FPM main process. + * **MySQL metrics.** NGINX Amplify Agent can obtain metrics from the MySQL global status set of variables. + * **NGINX metadata.** This is what describes your NGINX instances, and it includes package data, build information, the path to the binary, build configuration options, etc. NGINX metadata also includes the NGINX configuration elements. + * **System metadata.** This is the basic information about the OS environment where NGINX Amplify Agent runs. This can be the hostname, uptime, OS flavor, and other data. + +NGINX Amplify Agent will mostly use Python's [psutil()](https://github.com/giampaolo/psutil) to collect the metrics, but occasionally it may also invoke certain system utilities like *ps(1)*. + +While NGINX Amplify Agent is running on the host, it collects metrics at regular 20 second intervals. Metrics then get downsampled and sent to the Amplify backend once a minute. + +Metadata is also reported every minute. Changes in the metadata can be examined through the Amplify web interface. + +NGINX config updates are reported only when a configuration change is detected. + +If NGINX Amplify Agent can't reach the Amplify backend to send the accumulated metrics, it will continue to collect metrics. Once the connectivity is re-established, it will send them over to Amplify. The maximum amount of data NGINX Amplify Agent can buffer is about 2 hours. diff --git a/content/amplify/nginx-amplify-agent/source-code.md b/content/amplify/nginx-amplify-agent/source-code.md new file mode 100644 index 000000000..8e4b5d22d --- /dev/null +++ b/content/amplify/nginx-amplify-agent/source-code.md @@ -0,0 +1,15 @@ +--- +title: NGINX Amplify Agent Source Code +description: Learn where to find F5 NGINX Amplify Agent's source code. +weight: 700 +toc: true +tags: ["docs"] +docs: "DOCS-965" +--- + +F5 NGINX Amplify Agent is an open source application. It is licensed under the [2-clause BSD license](https://github.com/nginxinc/nginx-amplify-agent/blob/master/LICENSE), and is available here: + + * Sources: https://github.com/nginxinc/nginx-amplify-agent + * Public package repository: http://packages.amplify.nginx.com + * Install script for Linux: https://github.com/nginxinc/nginx-amplify-agent/raw/master/packages/install.sh + * A script to install NGINX Amplify Agent when the package is not available: https://raw.githubusercontent.com/nginxinc/nginx-amplify-agent/master/packages/install-source.sh diff --git a/content/amplify/nginx-amplify-agent/troubleshooting-metrics-collection.md b/content/amplify/nginx-amplify-agent/troubleshooting-metrics-collection.md new file mode 100644 index 000000000..ff8635c34 --- /dev/null +++ b/content/amplify/nginx-amplify-agent/troubleshooting-metrics-collection.md @@ -0,0 +1,27 @@ +--- +title: Troubleshooting Metrics Collection +description: Learn what to check if F5 NGINX Amplify Agent isn't reporting metrics. +weight: 500 +toc: true +tags: ["docs"] +docs: "DOCS-966" +--- + +After you [install and start]({{< relref "/amplify/nginx-amplify-agent/install/installing-amplify-agent" >}}) F5 NGINX Amplify Agent, it should start reporting right away, pushing aggregated data to the Amplify backend at regular 1 minute intervals. It'll take about a minute for a new system to appear in the Amplify web interface. + +If you don't see the new system or NGINX instance in the web interface, or (some) metrics aren't being collected, please review the following: + + 1. The NGINX Amplify Agent package has been successfully [installed]({{< relref "/amplify/nginx-amplify-agent/install/installing-amplify-agent" >}}), and no warnings were reported during the installation. + 2. The `amplify-agent` process is running and updating its [log file]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#agent-logfile" >}}). + 3. NGINX Amplify Agent is running under the same user as your NGINX worker processes. + 4. The NGINX instance is started with an absolute path. NGINX Amplify Agent **can't** detect NGINX instances launched with a relative path (e.g. "./nginx"). + 5. The [user ID that is used by NGINX Amplify Agent and the NGINX instance ]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#overriding-the-effective-user-id" >}}), can run `ps(1)` to see all system processes. If `ps(1)` is restricted for non-privileged users, NGINX Amplify Agent won't be able to find and properly detect the NGINX master process. + 6. The time is set correctly. If the time on the system where NGINX Amplify Agent runs is ahead or behind the world's clock, you won't be able to see the graphs. + 7. `stub_status` is [configured correctly]({{< relref "/amplify/nginx-amplify-agent/configuring-metric-collection" >}}), and the `stub_status module` is included in the NGINX build (this can be confirmed with `nginx -V`). + 8. NGINX [access.log](http://nginx.org/en/docs/http/ngx_http_log_module.html) and [error.log](http://nginx.org/en/docs/ngx_core_module.html#error_log) files are readable by the user `nginx` (or by the [user](http://nginx.org/en/docs/ngx_core_module.html#user) set in NGINX config). + 9. All NGINX configuration files are readable by NGINX Amplify Agent user ID (check owner, group, and permissions). + 10. Extra [configuration steps have been performed as required]({{< relref "/amplify/metrics-metadata/nginx-metrics#additional-nginx-metrics" >}}) for the additional metrics to be collected. + 11. The system DNS resolver is correctly configured, and *receiver.amplify.nginx.com* can be successfully resolved. + 12. Outbound TLS/SSL from the system to *receiver.amplify.nginx.com* is not restricted. This can be confirmed with `curl(1)`. [Configure a proxy server]({{< relref "/amplify/nginx-amplify-agent/install/configuring-amplify-agent#setting-up-a-proxy" >}}) for NGINX Amplify Agent if required. + 13. *selinux(8)*, *apparmor(7)* or [grsecurity](https://grsecurity.net) are not interfering with the metric collection. E.g., for _selinux_(8)* review **/etc/selinux/config**. Try `setenforce 0` temporarily and see if it improves the situation for certain metrics. + 14. Some VPS providers use hardened Linux kernels that may restrict non-root users from accessing */proc* and */sys*. Metrics describing system and NGINX disk I/O are usually affected. There is no easy workaround except for allowing NGINX Amplify Agent to run as `root`. Sometimes fixing permissions for */proc* and */sys/block* may work. diff --git a/content/amplify/overview/_index.md b/content/amplify/overview/_index.md new file mode 100644 index 000000000..bd6e393a5 --- /dev/null +++ b/content/amplify/overview/_index.md @@ -0,0 +1,5 @@ +--- +title: Introduction +weight: 100 +url: /nginx-amplify/overview/ +--- diff --git a/content/amplify/overview/overview-main-components.md b/content/amplify/overview/overview-main-components.md new file mode 100644 index 000000000..e0aeb6dbc --- /dev/null +++ b/content/amplify/overview/overview-main-components.md @@ -0,0 +1,36 @@ +--- +title: Overview and Main Components +description: Learn about F5 NGINX Amplify and its main components. +weight: 10 +toc: true +tags: ["docs"] +docs: "DOCS-976" +--- + +## What Is F5 NGINX Amplify? + +[NGINX Amplify](https://amplify.nginx.com/signup/) offers in-depth monitoring for NGINX-based web applications. It simplifies the process of analyzing and resolving issues related to performance and scalability. + +With NGINX Amplify, you can: + +- Spot performance issues, server overloads, and possible DDoS attacks easily. +- Receive intelligent advice to boost NGINX performance. +- Get alerts about issues in your application infrastructure. +- Plan for your web application's performance and capacity needs. +- Monitor all systems that run NGINX. + +## Main Components + +Hosted on the AWS public cloud, NGINX Amplify consists of several key components: + +### Agent + +- **NGINX Amplify Agent**: This Python app runs on the systems you're monitoring. It communicates securely with the SaaS backend using SSL/TLS. NGINX Amplify Agent always initiates the traffic. + +### NGINX Amplify Web Interface + +- Compatible with all major web browsers, this interface is only accessible through TLS/SSL. + +### NGINX Amplify Backend (SaaS Implementation) + +- This core system component handles metrics collection, data storage, analytics, and the core API. It's implemented as a SaaS. diff --git a/content/amplify/user-interface/_index.md b/content/amplify/user-interface/_index.md new file mode 100644 index 000000000..5a932fe3a --- /dev/null +++ b/content/amplify/user-interface/_index.md @@ -0,0 +1,5 @@ +--- +title: User Interface +weight: 600 +url: /nginx-amplify/user-interface/ +--- diff --git a/content/amplify/user-interface/account-settings.md b/content/amplify/user-interface/account-settings.md new file mode 100644 index 000000000..9e2bc8e18 --- /dev/null +++ b/content/amplify/user-interface/account-settings.md @@ -0,0 +1,43 @@ +--- +title: Account Settings +description: Learn about the Account settings for the User Interface. +weight: 70 +toc: true +tags: ["docs"] +docs: "DOCS-978" +--- + +The **Account** option in the user menu at the top right corner of the user interface povides access to several user settings. + +### Account Information + +In the Account section, you can check the information you provided upon signing up and edit specific fields. + +### Limits + +You can also see the current limits such as "maximum number of agents", "maximum number of custom dashboards", etc. + +### User Roles + +In the **Users** section, you can review the list of the user logins that are associated with this particular account. If you are the admin user, you can also invite your team members to the account. + +Users can be assigned one of the three roles — Admin, User, or Read-Only. Admin users are allowed to use all the Amplify User Interface functions, add/remove users, and modify everything. The User role is almost unrestricted except for managing other users. Read-only users can't modify graphs or manage users — this role can be useful for your support team members. + +### Notifications + +In the **Notifications** section, you will find information about the emails currently registered with your account and whether they are verified or not. The alert notifications are only sent to verified emails. + +In addition to the email alert notifications, you can optionally configure the integration with your Slack team and workspace. Under the registered emails section, select the "Add to Slack" button to allow Amplify to send you certain notifications on Slack. You will have to log in and provide the necessary details about your team and what channels you'd like to add to Amplify notifications. Both direct messages and channels can be used for notifications. If configured successfully, Amplify can send alert information to Slack. A few more additional notifications are available — e.g., F5 NGINX Amplify Agent not finding a running NGINX instance, but also proactive messages about the issues with the SSL certs. + + +{{< img src="amplify/amplify-notifications.png" alt="Notifications" >}} + +### Agent Settings + +The "Agent settings section is where you enable or disable account-wide behavior for: + + * NGINX configuration files analysis + * Periodic NGINX configuration syntax checking with "nginx -t" + * Analyzing SSL certs + +Per-system settings are accessible via the "Settings" icon that can be found for a particular NGINX on the [**Analyzer**]({{< relref "/amplify/user-interface/analyzer.md" >}}) page. Per-system settings override the global settings. If you prefer to monitor your NGINX configurations on all but some specific systems, you can uncheck the corresponding settings. diff --git a/content/amplify/user-interface/alerts.md b/content/amplify/user-interface/alerts.md new file mode 100644 index 000000000..42f2c2462 --- /dev/null +++ b/content/amplify/user-interface/alerts.md @@ -0,0 +1,29 @@ +--- +title: Alerts +description: Learn about the Alerts page of the User Interface. +weight: 60 +toc: false +tags: ["docs"] +docs: "DOCS-979" +--- + +The **Alerts** page describes the configuration of the alert rules used to notify you of any anomalies in the behavior of your systems. + +Alerts are based on setting a rule to monitor a particular metric. Alert rules allow the user to specify the metric, the trigger condition, the threshold, and the email for notifications. + +The way alert rules work is the following: + + 1. Incoming metric updates are being continuously monitored against the set of rules. + 2. If there's a rule for a metric, the new metric update is checked against the threshold. + 3. If the threshold is met, an alert notification is generated, and the rule will continue to be monitored. + 4. If subsequent metric updates show that the metric no longer violates the threshold for the configured period, the alert is cleared. + +By default, there's no filtering by host. If a specific alert should only be raised for a particular system, you should specify the hostname(s) or tags when configuring the alert. Currently, metrics can't be aggregated across all systems; instead, any system will match a particular rule unless a host is specified. + +There's one special rule which is the about **amplify.agent.status** metric. This metric reflects the state of F5 NGINX Amplify Agent (and hence, the state of the system as seen by Amplify). You can only configure a 2 minute interval and only 0 (zero) as the threshold for **amplify.agent.status**. + +You shouldn't see consecutive notifications about the same alert repeatedly. Instead, there will be digest information sent out *every 60 minutes*, describing which alerts were generated and which ones were cleared. + +{{< note >}} Gauges are *averaged* over the interval configured in the rule. Counters are *summed up*. Currently, this is not user configurable, and these are the only reduce functions available for configuring metric thresholds. {{< /note >}} + +{{< note >}} Emails are sent using [AWS SES](https://aws.amazon.com/ses/). Make sure your mail relay accepts their traffic. Also, make sure to verify the specified email and check the verification status in the Account menu. {{< /note >}} diff --git a/content/amplify/user-interface/analyzer.md b/content/amplify/user-interface/analyzer.md new file mode 100644 index 000000000..9958828bc --- /dev/null +++ b/content/amplify/user-interface/analyzer.md @@ -0,0 +1,52 @@ +--- +title: Analyzer +description: Learn about the Analyzer page of the User Interface. +weight: 50 +toc: false +tags: ["docs"] +docs: "DOCS-980" +--- + +F5 NGINX Amplify Agent parses NGINX configuration files and transmits them to the backend component for further analysis. NGINX Amplify offers configuration recommendations to help improve the performance, reliability, and security of your applications. With well-thought-out and detailed recommendations, you’ll know exactly where the problem is, why it is a problem, and how to fix it. + +When you switch to the **Analyzer** page, select a particular system on the left to see the associated report. If there are no NGINX instances on a system, there will be no report. + +{{< img src="amplify/amplify-analyzer.png" alt="Analyzer User Interface page" >}} + +The following information is provided when a report is generated from an NGINX config structure: + + * Version information + * Branch, release date, and the latest version in the branch + * Overview + * Path to NGINX config files(s) + * Whether the parser failed or not, and the results of `nginx -t` + * Last-modified info + * 3rd party modules found + * Breakdown of the key configuration elements (servers, locations, upstreams) + * Breakdown of IPv4/IPv6 usage + * Security + * Any security advisories that apply to this version of NGINX + * Virtual servers + * Breakdown of the virtual host configuration (think "apachectl -S") + * SSL + * OpenSSL version information + * Breakdown of the number of SSL or HTTP/2 servers configured + * Information about the configured SSL certificates + * Warnings about common SSL configuration errors + * Static analysis + * Various suggestions about configuration structure + * Typical configuration issues highlighted + * Common advice about proxy configurations + * Suggestions about simplifying rewrites for certain use cases + * Key security measures (e.g., *stub_status* is unprotected) + * Typical errors in configuring locations, especially with *regex* + +To parse SSL certificate metadata, NGINX Amplify Agent uses standard OpenSSL(1) functions. SSL certificates are parsed and analyzed only when the corresponding [settings]({{< relref "/amplify/user-interface/account-settings" >}}) are turned on. SSL certificate analysis is *off* by default. + +Static analysis will only include information about specific issues with the NGINX configuration if those are found in your NGINX setup. + +In the future, the **Analyzer** page will also include *dynamic analysis*, effectively linking the observed NGINX behavior to its configuration — e.g., when it makes sense to increase or decrease certain parameters like [proxy_buffers](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers), etc. + +{{< note >}} Config analysis is *on* by default. If you don't want your NGINX configuration to be checked, unset the corresponding setting in either Global, or Local (per-system) settings. See [**Settings**]({{< relref "/amplify/user-interface/account-settings" >}}). {{< /note >}} + +{{< img src="amplify/amplify-analyzer-settings.png" alt="Analyzer Settings" >}} \ No newline at end of file diff --git a/content/amplify/user-interface/dashboards.md b/content/amplify/user-interface/dashboards.md new file mode 100644 index 000000000..6ddeb5fc8 --- /dev/null +++ b/content/amplify/user-interface/dashboards.md @@ -0,0 +1,52 @@ +--- +title: Dashboards +description: Learn about the Dashboards page of the User Interface. +weight: 40 +toc: false +tags: ["docs"] +docs: "DOCS-981" +--- + +You can create your own dashboards populated with highly customizable graphs of NGINX and system-level metrics. + +Some of the use cases for a custom set of graphs are the following: + + * Checking NGINX performance for a particular application or microservice, e.g. based on the URI path + * Displaying metrics per virtual server + * Visualizing the performance of a group of NGINX servers — for example, front-end load balancers, or an NGINX edge caching layer + * Analyzing a detailed breakdown of HTTP status codes per application + +When building a custom graph, metrics can be summed or averaged across several NGINX servers. Using metric filters, it is also possible to create additional “metric dimensions” — for example, reporting the number of POST requests for a specific URI. + +To create a custom dashboard, select **CREATE DASHBOARD** on the **Dashboards** drop-down menu. You can choose to quickly build several graphs from a preset to populate your custom dashboard with useful visualizations, or you can create your graphs from scratch. + +To start with a graph set wizard, select **New Set**. + +{{< img src="amplify/amplify-custom-new-set.png" alt="Dashboards - New Set" >}} + +Select **New Graph** in the upper right corner to start adding graphs to the dashboard. + +When adding or editing a graph, the following dialog appears: + +{{< img src="amplify/amplify-custom-graph-filter.png" alt="Dashboards - New graph dialog" >}} + +To define a graph, perform these steps: + + 1. Enter the graph title. + 2. Pick one or more metrics. You can combine multiple metrics on the same graph using the "Add another metric" button. + 3. After the metric is selected, you can see the systems for which the metric has been observed. Select one or multiple systems here. You can also use tags to specify the systems. + 4. When aggregating across multiple systems, select either "Sum" or "Avg" as the aggregation function. + 5. Last but not least, the “filter” functionality is also available for NGINX metrics collected from the log files. If you select "Add metric filter", you can add multiple criteria to define specific "metric dimensions". In the example above, we are matching the NGINX upstream response time against the **/api/feed/reports** URI. You can also build other filters, e.g., displaying metric **nginx.http.status.2xx** for the responses with the status code 201. + 6. Select "Save" to add the graph to the dashboard. You can also edit the graph, move it around, resize it, stack the graphs on top of each other, etc. + +{{< note >}} When using filters, all the "metric dimensions" aren't stored in the F5 NGINX Amplify backend by default. A particular filter starts to slice the metric according to the specification only after the graph is created. Hence, it can be a while before the "filtered" metric is displayed on the graph — the end result depends on how quickly the log files are being populated with the new entries, but typically you should see the first data points in under 5 minutes. {{< /note >}} + +Because NGINX Amplify is **not** a SaaS log analyzer, the additional slicing for "metric dimensions" is implemented inside NGINX Amplify Agent. NGINX Amplify Agent can parse the NGINX access logs on-the-fly and extract all the necessary metrics **without** sending the raw log entries elsewhere. Moreover, NGINX Amplify Agent understands custom log formats automatically, and will start looking for various newly defined "metric dimensions" following a particular [log_format](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) specification. + +Essentially, NGINX Amplify Agent performs a combination of real-time log analytics and standard metrics collection (e.g., metrics from the *stub_status* module). NGINX Amplify Agent does only the **real-time log processing**, and always on the same host where it is running. + +Metric filters can be really powerful. By using the filters and creating additional "metric dimensions", it is possible to build highly granular and informative graphs. To enable NGINX Amplify Agent to slice the metrics you must add the corresponding log variables to the active NGINX log format. Please see the [Additional NGINX metrics]({{< relref "/amplify/metrics-metadata/nginx-metrics#additional-nginx-metrics" >}}) section below. + +Metric filters are available only for the metrics generated from the log files. For other metrics some additional modifiers can be set when editing a graph. E.g., for NGINX Plus it is possible to specify the status API zones to build more detailed visualizations. + +While editing the dashboard, you can also use additional features like "Clone" to streamline the workflow. diff --git a/content/amplify/user-interface/graphs.md b/content/amplify/user-interface/graphs.md new file mode 100644 index 000000000..1562565c9 --- /dev/null +++ b/content/amplify/user-interface/graphs.md @@ -0,0 +1,28 @@ +--- +title: Graphs +description: Learn about the Graphs page of the User Interface. +weight: 20 +toc: false +tags: ["docs"] +docs: "DOCS-982" +--- + + On the **Graphs** page, you can find a collection of predefined graphs. Here you can see an overview of the key metric stats, such as CPU, memory, and disk usage for all your systems. + +If you click on a system on the left, the graphs will change to reflect the metrics for the selected system. The graphs are further split into tabs such as "System", "NGINX" and so on. + +{{< img src="amplify/amplify-graphs.png" alt="Graphs section of the User Interface" >}} + +Some graphs have an additional selector. E.g., with "Disk Latency" or "Network Traffic" you can select what device or interface you're analyzing. + +Above the graphs, you will find the following: + + * Hostname or alias for the selected system + * System properties editor where you can set up an alias for the host and assign host tags + * List of tags assigned to the system + * Time range selector, which helps to display different time periods for the graphs + * Time zone selector + +You can also copy a predefined graph to a custom dashboard by focusing on the graph and clicking on the arrow in the top right corner. + +Check the [Metrics and Metadata]({{< relref "/amplify/metrics-metadata" >}}) documentation to learn more about the displayed metrics. diff --git a/content/amplify/user-interface/inventory.md b/content/amplify/user-interface/inventory.md new file mode 100644 index 000000000..5824bc056 --- /dev/null +++ b/content/amplify/user-interface/inventory.md @@ -0,0 +1,20 @@ +--- +title: Inventory +description: Learn about the Inventory page of the User Interface. +weight: 30 +toc: false +tags: ["docs"] +docs: "DOCS-983" +--- + +You can access the inventory by selecting the first icon on the top menu. The inventory gives an overview of the systems that are being monitored. When F5 NGINX Amplify Agent is running and reporting on a new system, it's listed in the system index on the left side of the user interface and in the **Inventory** section. + +{{< img src="amplify/amplify-inventory.png" alt="Inventory section of the User Interface" >}} + +The **Inventory** allows you to check the status of all systems at a glance. It also provides a quick overview of the key metrics. + +In the rightmost column of the **Inventory**, you will find the settings and the metadata viewer icons. Select the "Info" icon to access useful information about the OS and the monitored NGINX instances. If you need to remove an object from the monitoring, select the "Trash" icon. + +You can apply sorting, search, and filters to the **Inventory** to quickly find the system in question. You can search and filter by hostname, IP address, architecture, etc. You can use regular expressions with the search function. + +{{< note >}} When removing an object from monitoring, keep in mind that you also need to stop or uninstall NGINX Amplify Agent on the systems being removed; otherwise, the objects will reappear in the User Interface. Be sure to delete any system-specific alert rules too.{{< /note >}} \ No newline at end of file diff --git a/content/amplify/user-interface/overview.md b/content/amplify/user-interface/overview.md new file mode 100644 index 000000000..f4975cf00 --- /dev/null +++ b/content/amplify/user-interface/overview.md @@ -0,0 +1,66 @@ +--- +title: Overview +description: Learn about the Overview page of the User Interface. +weight: 10 +toc: true +tags: ["docs"] +docs: "DOCS-984" +--- + +The Overview page is designed to provide a quick summary of the state of your NGINX infrastructure. Here you can quickly check the total sum of HTTP 5xx errors over the past 24 hours and compare it to the previous 24 hours. + +Five key overlay graphs are displayed for the selected period. By switching over various periods, you can compare trends and see if anything abnormal shows up. + +The cumulative [metrics]({{< relref "/amplify/metrics-metadata" >}}) displayed on the **Overview** page are: + + * Total requests — sum of **nginx.http.request.count** + * HTTP 5xx errors — sum of **nginx.http.status.5xx** + * Request time (P95) — average of **nginx.http.request.time.pctl95** + * Traffic — sum of **system.net.bytes_sent** rate + * CPU Usage — average of **system.cpu.user** + +{{< note >}} By default the metrics above are calculated for all monitored hosts. You can configure specific tags in the **Overview** settings popup to display the metrics for a set of hosts (e.g. only the "production environment"). {{< /note >}} + +You may see zero numbers if some metrics are not being gathered, e.g., if the request time (P95) is 0.000s, please check that you have correctly configured NGINX log for [additional metric]() collection. + +{{< img src="amplify/amplify-overview.png" alt="Overview section of the User Interface" >}} + +## Application Health Score + +The upper left block displays a total score that reflects your web app performance. It's called Application Health Score (AHS). + +The Application Health Score (AHS) is an Apdex-like numerical measure that can be used to estimate the quality of experience for your web application. + +AHS is a product of 3 derivative service level indicators (SLI) — percentage of successful requests, percentage of "timely" requests, and agent availability. The "timely" requests are those with the total observed average request time P95 either below the low threshold (100% satisfying) or between the low and high threshold (partially satisfying). + +A simplified formula for AHS is the following: + +AHS = (Successful Requests %) * (Timely Requests %) * (Agent Availability %) + +Each individual SLI in this formula can be turned on or off. By default, only the percentage of successful requests is on. + +There are T1 and T2 thresholds for the total observed average request time P95, that you can configure for AHS: + + * T1 is the low threshold for satisfying requests + * T2 is the high threshold for partially satisfying requests + +If the average request time (P95) for the selected period is below T1, this is considered 100% satisfying state of requests. If the request time is above T1 and below T2, a "satisfaction ratio" is calculated accordingly. Requests above T2 are considered totally unsatisfying. E.g., with T1=0.2s and T2=1s, a request time greater than 1s would be considered unsatisfying, and the resulting score would be 0%. + +The algorithm for calculating the AHS is: + +```python +successful_req_pct = (nginx.http.request.count - nginx.http.status.5xx) / nginx.http.request.count + +if (nginx.http.request.time.pctl95 < T1) + timely_req_pct = 1 +else + if (nginx.http.request.time.pctl95 < T2) + timely_req_pct = 1 - (nginx.http.request.time.pctl95 - T1) / (T2 - T1) + else + timely_req_pct = 0 + +m1 = successful_req_pct +m2 = timely_req_pct +m3 = agent_up_pct + +app_health_score = m1 * m2 * m3 diff --git a/content/controller/_index.md b/content/controller/_index.md new file mode 100644 index 000000000..27b122bf6 --- /dev/null +++ b/content/controller/_index.md @@ -0,0 +1,11 @@ +--- +description: "NGINX Controller provides application delivery and API + management for modern app teams." +title: F5 NGINX Controller +weight: 2100 +cascade: + logo: "NGINX-Controller-product-icon-RGB.svg" + type: "ctlr-eos" +url: /nginx-controller/ +--- + diff --git a/content/controller/admin-guides/_index.md b/content/controller/admin-guides/_index.md new file mode 100644 index 000000000..a1ae4ba2a --- /dev/null +++ b/content/controller/admin-guides/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn how to install and manage NGINX Controller and NGINX Controller + Agent. +menu: + docs: + parent: NGINX Controller +title: Admin Guides +weight: 100 +url: /nginx-controller/admin-guides/ +--- diff --git a/content/controller/admin-guides/backup-restore/_index.md b/content/controller/admin-guides/backup-restore/_index.md new file mode 100644 index 000000000..ac42e8e9a --- /dev/null +++ b/content/controller/admin-guides/backup-restore/_index.md @@ -0,0 +1,11 @@ +--- +description: Learn how to back up and restore NGINX Controller. +menu: + docs: + parent: Admin Guides + title: Backup & Restore + weight: 50 +title: Back Up & Restore +weight: 300 +url: /nginx-controller/admin-guides/backup-restore/ +--- diff --git a/content/controller/admin-guides/backup-restore/backup-restore-cluster-config.md b/content/controller/admin-guides/backup-restore/backup-restore-cluster-config.md new file mode 100644 index 000000000..6edfb59cb --- /dev/null +++ b/content/controller/admin-guides/backup-restore/backup-restore-cluster-config.md @@ -0,0 +1,34 @@ +--- +description: Learn how to back up your F5 NGINX Controller cluster configuration and + encryption keys. +docs: DOCS-247 +doctypes: +- task +tags: +- docs +title: Back Up & Restore Cluster Config and Encryption Keys +toc: true +weight: 97 +--- + +## Overview + +After installing F5 NGINX Controller, you should back up the cluster config and encryption keys. You'll need these if you ever need to restore the NGINX config database on top of a new NGINX Controller installation. + +- To back up the NGINX Controller cluster configuration and encryption keys: + + ```bash + /opt/nginx-controller/helper.sh cluster-config save + ``` + + The file is saved to `/opt/nginx-controller/cluster-config.tgz`. + +- To restore the cluster's config and encryption keys, take the following steps: + + ```bash + /opt/nginx-controller/helper.sh cluster-config load + ``` + +{{< versions "3.12" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md b/content/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md new file mode 100644 index 000000000..156cf2f21 --- /dev/null +++ b/content/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md @@ -0,0 +1,164 @@ +--- +description: Learn how to back up and restore the embedded F5 NGINX Controller config + database. +docs: DOCS-248 +doctypes: +- tutorial +tags: +- docs +title: Back Up & Restore an Embedded Config Database +toc: true +weight: 98 +--- + +## Overview + +Follow the steps in this guide to back up and restore an internal F5 NGINX Controller config database. Use this guide if you selected the option to use an embedded config database when you installed NGINX Controller. Embedded config means that NGINX Controller is using an internal database to store configuration data. + +## Automated Backups of Embedded Config Database + +NGINX Controller automatically takes a snapshot of the embedded config database every 60 minutes and saves the backups on the config DB volume. The backup file location varies depending on the volume chosen at setup: + +- **Local**: The backup files are located in `/opt/nginx-controller/postgres_data/` with the following naming scheme: `backup_.tar`. + +- **NFS**: The backup files are located in the path on the NFS server host that was specified during installation and have the following naming scheme: `backup_.tar`. + +These automated config backups do not include backups of metrics data, which must be backed up separately; refer to [Backup & Restore the Metrics Database]({{< relref "/controller/admin-guides/backup-restore/backup-restore-metrics-db.md" >}}) for those instructions. + +{{< tip >}} +As a best practice, we recommend that you make scheduled backups of the entire config DB volume and keep the backups off-site for safekeeping. +{{< /tip >}} + +  + +--- + +## Restore Embedded Config Database + +This section explains how to restore the embedded config database from the latest backup file or a specific, timestamped file. + +{{< important >}}If you restore the config database on top of a new installation of NGINX Controller, make sure to follow the steps to [restore your NGINX config and encryption keys]({{< relref "/controller/admin-guides/backup-restore/backup-restore-cluster-config.md" >}}) afterward. {{< /important >}} + +- To restore the embedded NGINX Controller config database **from the latest automated backup**, run the following command: + + ```bash + /opt/nginx-controller/helper.sh backup restore + ``` + +- To restore the embedded config database from **a specific backup file**: + + ```bash + /opt/nginx-controller/helper.sh backup restore + ``` + + - If you installed the embedded config database on a **local volume**, the backup files are located in `/opt/nginx-controller/postgres_data/`. + + - If you installed the embedded config database on an **NFS volume**, follow the steps in [(NFS) Copy Config Database Backup to Local Volume for Restoration]({{< relref "/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md#nfs-copy-config-database-backup-to-local-volume-for-restoration" >}}) to download the backup file to your local volume, and then use the `helper.sh` script to restore from it. + +  + +### (NFS) Copy Config Database Backup to Local Volume for Restoration + + + +To restore the embedded config database from a specific backup file, the file needs to be on your local volume. + +Take the following steps to copy an embedded config database backup file from an NFS volume to your local volume for restoration: + +1. Log on to the node where PostgreSQL is installed as a user with sudo privileges. + +1. Change to the `/opt/nginx-controller` directory: + + ``` bash + cd /opt/nginx-controller + ``` + +1. Create a local backup directory to copy the backup file to: + + ``` bash + mkdir local_backups + ``` + +1. Get the NFS volume details: + + ``` bash + mount | grep nfs + ``` + + The output looks similar to the following: + + ``` bash + : on type nfs4 (mount options...) + ``` + + For example: + + ``` bash + 192.0.2.1:/mnt/nfs_share/nfs_postgresql on /var/lib/kubelet/pods/1ce4e221-d6d6-434f-9e73-bc81c879530e/volumes/kubernetes.io~nfs/controller-postgres type nfs4 (mount options ...) + ``` + +1. Record the `:` details corresponding to the `nfs_postgresql` volume, namely the volume mounted on the Kubernetes `controller-postgres` container. + + For example: + + ``` bash + 192.0.2.1:/mnt/nfs_share/nfs_postgresql + ``` + +1. Create a parent directory to mount the NFS path to: + + ``` bash + sudo mkdir -p /mnt/local_pgdata + ``` + +1. Mount the NFS path: + + ``` bash + sudo mount : /mnt/local_pgdata + ``` + + For example: + + ``` bash + sudo mount 192.0.2.1:/mnt/nfs_share/nfs_postgresql /mnt/local_pgdata + ``` + +1. View the list of the available backup files. The files have the following naming scheme: `backup_.tar`. + + ```bash + ls /mnt/local_pgdata/ + ``` + +1. Copy the backup file from which you want to restore to the `local_backups/` directory: + + ``` bash + sudo cp /mnt/local_pgdata/backup_.tar local_backups/ + ``` + +1. Use the NGINX Controller `helper.sh` script to restore the backup file: + + ``` bash + /opt/nginx-controller/helper.sh backup restore local_backups/backup_.tar + ``` + +1. After the backup has been restored, you can unmount the NFS path and delete the backup file in the `local_backups` directory: + + ``` bash + sudo umount /mnt/local_pgdata + rm -i local_backups/backup_.tar + ``` + + + + +  + +--- + +## What's Next + +- [Backup & Restore the Metrics Database]({{< relref "/controller/admin-guides/backup-restore/backup-restore-metrics-db.md" >}}) + +{{< versions "3.12" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/backup-restore/backup-restore-external-config-db.md b/content/controller/admin-guides/backup-restore/backup-restore-external-config-db.md new file mode 100644 index 000000000..2c465c0b6 --- /dev/null +++ b/content/controller/admin-guides/backup-restore/backup-restore-external-config-db.md @@ -0,0 +1,122 @@ +--- +description: Learn how to back up and restore the external F5 NGINX Controller config + database. +docs: DOCS-249 +doctypes: +- tutorial +tags: +- docs +title: Back Up & Restore an External Config Database +toc: true +weight: 99 +--- + +## Overview + +Follow the steps in this guide to back up and restore an external F5 NGINX Controller config database. Use this guide if you selected the option to use an external PostgreSQL config database when you installed NGINX Controller. External config means that you set up NGINX Controller to store configuration data in your own Postgres database. + +## Before You Begin + +To backup and restore the external config database, you'll need the following: + +- Login credentials for your NGINX Controller PostgreSQL database +- A connection to your NGINX Controller PostgreSQL database +- [psql](https://www.postgresql.org/docs/9.5/app-psql.html) and [pg_dump](https://www.postgresql.org/docs/9.5/app-pgdump.html) installed on the server where you'll be performing the backup or restore + +### Set the PostgreSQL Environment Variables + +1. Log in to the NGINX Controller host using SSH. +2. Set the following environment variables using the credentials for your NGINX Controller PostgreSQL database: + + ``` bash + export PGHOST= + export PGPORT=5432 + export PGUSER= + export PGPASSWORD= + ``` + + {{< note >}} +If you've configured PostgreSQL to use SSL, ensure that you've placed your certs in `~/.postgresql`. For more information, see [Client Certificates](https://www.postgresql.org/docs/9.5/libpq-ssl.html#LIBPQ-SSL-CLIENTCERT) in the PostgreSQL documentation. + {{< /note >}} + +  + +--- + +## Back Up External Config Database + +Take the following steps to back up the external NGINX Controller config database: + +1. Stop NGINX Controller: + + ``` bash + /opt/nginx-controller/helper.sh controller stop + ``` + +1. Run the following script to back up the NGINX Controller database. The backup files are saved in a directory that looks like `pgbackup_`. + + ``` bash + DATE=$(date +"%Y%m%d%H%M") + mkdir ~/pgbackup_${DATE} + + for db in common data system vault; do + pg_dump -w -E utf8 ${db} -F c -f ~/pgbackup_${DATE}/${db}-${DATE}.backup + done + ``` + +1. Start NGINX Controller: + + ``` bash + /opt/nginx-controller/helper.sh controller start + ``` + + +  + +--- + +## Restore External Config Database + +{{< important >}}If you restore the config database on top of a new installation of NGINX Controller, make sure to follow the steps to [restore your NGINX config and encryption keys]({{< relref "/controller/admin-guides/backup-restore/backup-restore-cluster-config.md" >}}) afterward. {{< /important >}} + +To restore the external NGINX Controller config database: + +1. Stop NGINX Controller: + + ``` bash + /opt/nginx-controller/helper.sh controller stop + ``` + +1. Locate the backup directory and save the name as a local environment variable. The name of the backup directory follows the format `pgbackup_`. + + ``` bash + BACKUP_PATH=~/pgbackup_ + ``` + +1. Run the restore script: + + ``` bash + for backup_file in "$BACKUP_PATH"/*.backup; do + db="$(basename "$backup_file" | cut -d '-' -f 1)" + pg_restore -c -C -d "$db" "$backup_file" + done + ``` + +1. Start NGINX Controller: + + ``` bash + /opt/nginx-controller/helper.sh controller start + ``` + + +  + +--- + +## What's Next + +- [Backup & Restore the Metrics Database]({{< relref "/controller/admin-guides/backup-restore/backup-restore-metrics-db.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/backup-restore/backup-restore-metrics-db.md b/content/controller/admin-guides/backup-restore/backup-restore-metrics-db.md new file mode 100644 index 000000000..101d6c0ed --- /dev/null +++ b/content/controller/admin-guides/backup-restore/backup-restore-metrics-db.md @@ -0,0 +1,39 @@ +--- +description: Learn how to back up and restore the F5 NGINX Controller analytics database. +docs: DOCS-250 +doctypes: +- task +tags: +- docs +title: Back Up & Restore the Analytics Database +toc: true +weight: 100 +--- + +## Overview + +This guide explains how to back up and restore the F5 NGINX Controller analytics database. Backing up and restoring the analytics data lets you preserve the history of graphs. Backing up this information is optional. + +## Back Up the Analytics Database + +Make a backup copy of the metrics database following the steps for your volume type: + +- **Local**: Make a back up copy of the metrics data that's located in `/opt/nginx-controller/clickhouse_data` by default, or on the volume that you specified when installing NGINX Controller. + +- **NFS**: Make a backup copy of all of the data in the NFS path or make a copy of the ClickHouse binary data. Refer to the official ClickHouse documentation on [Data Backup](https://clickhouse.tech/docs/en/operations/backup/). + +- **EBS**: For AWS, refer to the [Amazon EBS snapshots](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) documentation to create a volume snapshot. + +## Restore the Analytics Database + +Restore the backup copy of the metrics database following the steps for your volume type: + +- **Local**: Copy the data you backed up to `/opt/nginx-controller/clickhouse_data`. + +- **NFS**: Copy the ClickHouse binary data in the NFS path. Refer to the official ClickHouse documentation on [Data Backup](https://clickhouse.tech/docs/en/operations/backup/). + +- **EBS**: For AWS, refer to the [Amazon EBS snapshots](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) documentation to restore a volume snapshot. + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/config-agent/_index.md b/content/controller/admin-guides/config-agent/_index.md new file mode 100644 index 000000000..1edb9dc6e --- /dev/null +++ b/content/controller/admin-guides/config-agent/_index.md @@ -0,0 +1,12 @@ +--- +description: Learn how to configure and manage the interaction between the F5 NGINX Plus + data plane and NGINX Controller. +menu: + docs: + parent: Admin Guides + title: Configure NGINX Controller Agent + weight: 30 +title: Configure NGINX Controller Agent +weight: 200 +url: /nginx-controller/admin-guides/config-agent/ +--- diff --git a/content/controller/admin-guides/config-agent/about-controller-agent.md b/content/controller/admin-guides/config-agent/about-controller-agent.md new file mode 100644 index 000000000..53c26b7e8 --- /dev/null +++ b/content/controller/admin-guides/config-agent/about-controller-agent.md @@ -0,0 +1,85 @@ +--- +description: Learn about the NGINX Controller Agent. +docs: DOCS-508 +doctypes: +- concept +tags: +- docs +title: Get to Know the F5 NGINX Controller Agent +toc: true +weight: 100 +--- + +## Overview + +The F5 NGINX Controller Agent is a compact application written in Golang. NGINX Controller uses the Controller Agent to manage and monitor each NGINX Plus instance that the Agent is installed on. Once installed, the NGINX Controller Agent collects metrics and metadata and sends them securely to NGINX Controller for storage and visualization. + +## How NGINX Controller Agent Works + +You need to [install the NGINX Controller Agent]({{< relref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}) on all of the hosts you'd like to monitor. + +Once installed, the NGINX Controller Agent automatically starts to report metrics. You should see the real-time metrics data in the NGINX Controller user interface after about one minute. + +There's no need to manually add or configure anything in the NGINX Controller user interface after installing the Agent. When the Agent is started, the metrics and the metadata are automatically reported to NGINX Controller and are visualized in the user interface. You can, however, [configure the NGINX Controller Agent]({{< relref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) to customize how it collects and reports metrics. + +All communications between the NGINX Controller Agent and the backend are done securely over SSL/TLS. All traffic is always initiated by the NGINX Controller Agent. The backend system doesn't set up any connections back to the NGINX Controller Agent. + +## Detecting and Monitoring NGINX Instances + +The NGINX Controller Agent attempts to detect and monitor all unique NGINX process instances running on a host and collects a separate set of metrics and metadata for each. The Agent uses the following qualifications to identify unique NGINX instances: + +- A unique control process and its workers, started with an **absolute path** to a distinct NGINX binary. +- A control process running with a default config path, or with a custom path set in the command-line parameters. + +{{< caution >}}You should not make manual changes to the `nginx.conf` file on NGINX Plus instances that are managed by NGINX Controller. Manually updating the `nginx.conf` file on managed instances may adversely affect system performance. In most cases, NGINX Controller will revert or overwrite manual updates made to `nginx.conf`.{{< /caution >}} + +
+ +## Supported Systems + +NGINX Controller, the NGINX Controller Agent, and the NGINX Controller Application Security Add-on support the following distributions and architectures. + +{{< see-also >}}Refer to the [NGINX Plus Technical Specifications](https://docs.nginx.com/nginx/technical-specs/) guide for the distributions that NGINX Plus supports.{{< /see-also >}} + +{{< bootstrap-table "table table-striped table-bordered" >}} + +|Distribution
and Version|NGINX Controller
(Control Plane)|Agent
(Data Plane)|ADC App. Sec.
(Data Plane)|APIM Adv. Sec.
(Data Plane)|Notes| +|--- |--- |--- |--- |--- |--- | +|Amazon Linux
2
(x86_64)| Not supported|v3.0+ |Not supported|Not supported| | +|Amazon Linux
2017.09+
(x86_64)| Not supported |v3.0+|Not supported |Not supported| | +|CentOS
6.5+
(x86_64)| Not supported |v3.0+| Not supported |Not supported| • CentOS 6.5 and later versions in the CentOS 6 family are partially supported.
• This distribution does not support AVRD.| +|CentOS
7.4+
(x86_64)|v3.0+|v3.0+ | v3.12+ |v3.19+| • CentOS 7.4 and later versions in the CentOS 7 family are supported.| +|Debian
8
(x86_64)| Not supported |v3.0–3.21|Not supported|Not supported|• This distribution does not support AVRD.| +|Debian
9
(x86_64)|v3.0+|v3.0–3.21 | v3.12+ |v3.19+ | | +|Debian
10
(x86_64)| Not supported |v3.17+ | v3.17+ |v3.19+| See the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/) for requirements for Debian 10. | +|Red Hat Enterprise Linux
6.5+| Not supported |v3.0+| Not supported | Not supported| • RHEL 6.5 and later versions in the RHEL 6 family are partially supported.| +|Red Hat Enterprise Linux
7.4+
(x86_64)|v3.5+|v3.5+ | v3.12+|v3.19+| • RHEL 7.4 and later versions in the RHEL 7 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | +|Red Hat Enterprise Linux
8.0+
(x86_64)|v3.22+|v3.22+ | v3.22+| Not supported | • RHEL 8.0 and later versions in the RHEL 8 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | +|Ubuntu
18.04 LTS
(x86_64)|v3.0+|v3.0+ |v3.13+|v3.19+| | +|Ubuntu
20.04 LTS
(x86_64)|v3.20+|v3.12+|v3.16.1+|v3.19+| | + +{{< /bootstrap-table >}} + + + + +#### Analytics, Visibility, and Reporting Daemon (AVRD) + +NGINX Controller v3.1 and later use an Analytics, Visibility, and Reporting daemon (AVRD) to aggregate and report app-centric metrics, which you can use to track and check the health of your apps. To learn more about these metrics, see the [NGINX Metrics Catalog]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) topic. + +{{< see-also >}} +See the [NGINX Controller Technical Specifications]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}) for the complete list of system requirements for NGINX Controller and the NGINX Controller Agent. +{{< /see-also >}} + +## Supported Python Versions + +NGINX Controller and the NGINX Controller Agent versions 3.6 and earlier require Python 2.6 or 2.7. Python is not needed for NGINX Controller or the NGINX Controller Agent versions 3.7 and later. + +## What's Next + +- [Install the NGINX Controller Agent]({{< relref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}) +- [Customize how the NGINX Controller Agent collects metrics]({{< relref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/config-agent/configure-metrics-collection.md b/content/controller/admin-guides/config-agent/configure-metrics-collection.md new file mode 100644 index 000000000..767cb5099 --- /dev/null +++ b/content/controller/admin-guides/config-agent/configure-metrics-collection.md @@ -0,0 +1,152 @@ +--- +description: Contains instructions for setting up the F5 NGINX Controller Agent to collect + metrics for NGINX Plus instances. +docs: DOCS-509 +doctypes: +- task +tags: +- docs +title: Set up Metrics Collection +toc: true +weight: 120 +--- + +## Before You Begin + +- Before you can set up metrics collection, you first need to [install and start the F5 NGINX Controller Agent]({{< relref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}), so that the Agent can start pushing aggregated data to NGINX Controller. + +## Objectives + +Follow the steps in this guide to configure how metrics are collected and monitored. + +## Configuring NGINX for Metrics Collection + +In order to monitor an NGINX Plus instance, the NGINX Controller Agent needs to find the relevant NGINX control process and determine its key characteristics. + +The Agent is able to automatically find all relevant NGINX configuration files, parse them, extract their logical structure, and send the associated JSON data to the Controller Server for further analysis and reporting. + +### SSL Certificate Parsing and Analysis + +To parse SSL certificate metadata, the NGINX Controller Agent uses standard `openssl(1)` functions. SSL certificates are parsed and analyzed only when the corresponding [settings]({{< relref "/controller/admin-guides/config-agent/configure-the-agent.md#default-agent-settings" >}}) are turned on. SSL certificate analysis is *on* by default. + +To enable or disable analyzing SSL certs: + +1. Open the NGINX Controller user interface and log in. +2. Select the NGINX Controller menu icon, then select **Platform**. +3. On the **Platform** menu, select **Agent**. +4. On the **Default agent settings** page, select or clear the **Analyze SSL certificates** box. + +### Metrics from `/api` + +NGINX Controller uses the `/api` location on the NGINX Plus instance to collect metrics. + +When you push a configuration to an NGINX Plus instance, NGINX Controller automatically enables the `/api` location for that instance. + +{{< note >}} +The `/api` location settings that NGINX Controller creates will override any settings that you have previously defined. +{{< /note >}} + +If you use NGINX Controller solely to monitor your NGINX Plus instances, you may need to enable the `/api` location on your instances manually. +Refer to the [Configuring the API](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api) section of the NGINX Plus Admin Guide for instructions. + +For more information about the metrics list, refer to [Overview: Metrics and Metadata]({{< relref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}). + +### Metrics from `access.log` and `error.log` + +The NGINX Controller Agent collects NGINX metrics from the [access.log](http://nginx.org/en/docs/http/ngx_http_log_module.html) and the [error.log](http://nginx.org/en/docs/ngx_core_module.html#error_log) by default. + +You don't have to specifically point the Agent to either the NGINX configuration or the NGINX log files. The Agent should detect their location automatically. However, **you do need to make sure that the Agent can read the log files**. + +To do so, verify that either the `nginx` user or the [user defined in the NGINX config](https://nginx.org/en/docs/ngx_core_module.html#user) -- such as `www-data` -- can read the log files. In addition, make sure that the log files are being written normally. + +The Agent will try to detect the [log format](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) for a particular log, so that it can parse the log correctly and extract relevant metrics data. + +#### Enable Custom `access.log` Metrics + +Some metrics included in the [NGINX Metrics reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) are not available unless the corresponding variables are included in a custom [access.log](https://nginx.org/en/docs/http/ngx_http_log_module.html) format in the NGINX config. + +{{< see-also >}} + +- Read [Configuring Logging](https://docs.nginx.com/nginx/admin-guide/monitoring/logging/#setting-up-the-access-log) in the NGINX Admin Guide. +- View the complete list of [NGINX log variables](https://nginx.org/en/docs/varindex.html). + +{{< /see-also >}}. + +Take the steps in this section to enable the NGINX Controller Agent to collect metrics from custom `access.log` variables. + +1. Add a new [access.log](https://nginx.org/en/docs/http/ngx_http_log_module.html) format to the NGINX configuration (or modify an existing one). + +2. Add the desired [NGINX variables](https://nginx.org/en/docs/varindex.html) to the log format. For example: + + ```nginx + log_format main_ext '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" ' + '"$host" sn="$server_name" ' + 'rt=$request_time ' + 'ua="$upstream_addr" us="$upstream_status" ' + 'ut="$upstream_response_time" ul="$upstream_response_length" ' + 'cs=$upstream_cache_status' ; + ``` + +3. Use the extended log format in your access log configuration: + + ```nginx + access_log /var/log/nginx/access.log main_ext; + ``` + + {{< note >}} +By default, the Controller Agent processes all access logs that it finds in your log directory. If you define a new log file with the extended log format that contains entries that are already being logged to another access log, your metrics might be counted twice. Refer to the [Agent configuration]({{< relref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) guide to learn how to exclude specific log files from processing. + {{< /note >}} + +4. Set the [error.log](https://nginx.org/en/docs/ngx_core_module.html#error_log) log level to `warn`. + + ```nginx + error_log /var/log/nginx/error.log warn; + ``` + +5. [Reload](https://nginx.org/en/docs/control.html) your NGINX configuration: + + ```bash + service nginx reload + ``` + +When the Controller Agent discovers these metrics, the NGINX Controller **Analytics Dashboards Overview** will automatically update with a predefined set of graphs. +You can also use these metrics to build more specific set of [custom Dashboards]({{< relref "/controller/analytics/dashboards/custom-dashboards.md" >}}). + +### Collect Metrics from Syslog + +If you set up the Controller Agent to [use Syslog]({{< relref "/controller/admin-guides/config-agent/configure-the-agent.md#logging-to-syslog" >}}), you need to set up the Controller Agent to collect metrics from Syslog. + +Take the steps below to enable metrics collection from Syslog: + +1. Edit the NGINX configuration file. + + 1. Specify the `syslog` listener address as the first parameter to the [access.log](https://nginx.org/en/docs/http/ngx_http_log_module.html) directive. + 2. Include the `controller` tag and your preferred log format: + + ```nginx + access_log syslog:server=127.0.0.1:12000,tag=controller,severity=info main_ext; + ``` + +2. Reload NGINX: + + ```bash + service nginx reload + ``` + + For more information, see [Controlling NGINX](https://nginx.org/en/docs/control.html). + +{{< note >}} +To send the NGINX logs to both the existing logging facility and the NGINX Controller Agent, include a separate [access.log](https://nginx.org/en/docs/http/ngx_http_log_module.html) directive for each destination. +{{< /note >}} + + +## What's Next + +- [Overview: NGINX Metrics and Metadata]({{< relref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) +- [What to check if the Controller Agent isn't reporting metrics]({{< relref "/controller/support/troubleshooting-controller.md#troubleshooting-metrics" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/config-agent/configure-the-agent.md b/content/controller/admin-guides/config-agent/configure-the-agent.md new file mode 100644 index 000000000..23ee87785 --- /dev/null +++ b/content/controller/admin-guides/config-agent/configure-the-agent.md @@ -0,0 +1,217 @@ +--- +description: Customize the F5 NGINX Controller Agent configuration. +docs: DOCS-510 +doctypes: +- tutorial +tags: +- docs +title: Configure the NGINX Controller Agent +toc: true +weight: 110 +--- + +## Overview + +Follow the steps in this guide to customize the F5 NGINX Controller Agent configuration. + +## Default Agent Settings + +To access the **Default Agent Settings** page: + +1. Open the NGINX Controller user interface and log in. +2. Select the NGINX Controller menu icon, then select **Platform**. +3. On the **Platform** menu, select **Agent**. + +On the **Default Agent Settings** page, you can set the following default settings for the NGINX Controller Agent: + +- **NGINX configuration file analysis**. This setting is enabled by default. +- **Periodic NGINX configuration syntax checking with "nginx -t"**. This setting is disabled by default. +- **Analyzing SSL certs**. This setting is enabled by default. + +## Enable /api Location + +NGINX Controller uses the `/api` location on the NGINX Plus instance to collect metrics. + +When you push a configuration to an NGINX Plus instance, NGINX Controller automatically enables the `/api` location for that instance. + +{{< note >}} +The `/api` location settings that NGINX Controller creates will override any settings that you have previously defined. +{{< /note >}} + +If you use NGINX Controller solely to monitor your NGINX Plus instances, you may need to enable the `/api` location on your instances manually. +Refer to the [Configuring the API](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api) section of the NGINX Plus Admin Guide for instructions. + +## Controller Agent Configuration File + +The configuration file for the NGINX Controller Agent is located at `/etc/controller-agent/agent.conf`. This configuration file is a text-based file. + +## Change the API Key + +When you first [install the NGINX Controller Agent]({{< relref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}), your API key is written to the `agent.conf` file automatically. If you ever need to change the API key, you can edit the following section in `agent.conf` accordingly: + +``` nginx +[credentials] +api_key = YOUR_API_KEY +``` + +## Change the Hostname and UUID + +To create unique objects for monitoring, the NGINX Controller Agent must be able to extract a valid hostname from the system. The hostname is also used as one of the components for generating a unique identifier. Essentially, the hostname and the UUID (universally unique identifier) unambiguously identify a particular instance of the NGINX Controller Agent to NGINX Controller. If the hostname or the UUID are changed, the NGINX Controller Agent and the server will register a new object for monitoring. + +The NGINX Controller Agent tries its best to determine the correct hostname. If the Agent cannot determine the hostname, you can set the hostname in the `agent.conf` file. Check for the following section, and provide the desired hostname here: + +``` nginx +[credentials] +.. +hostname = myhostname1 +``` + +The hostname should be real. The NGINX Controller Agent won't start unless a valid hostname is defined. The following *are not* valid hostnames: + +- localhost +- localhost.localdomain +- localhost6.localdomain6 +- ip6-localhost + +{{< note >}} + +You can use the above method to replace the system's hostname with an arbitrary alias. Keep in mind that if you redefine the hostname for a live object, the existing object will be marked as failed in the NGINX Controller user interface. Redefining the hostname in the NGINX Controller Agent's configuration creates a new UUID and a new system for monitoring. + +Alternatively, you can define an alias for the host in the NGINX Controller user interface. Go to the **Graphs** page, select the system that you want to update, and click the gear icon. + +{{< /note >}} + +## Preserving the UUID across OS upgrades + +The UUID is generated based on a combination of the hostname and underlying OS functions. An upgrade to the OS may lead to a new UUID and cause previously registered agents to be offline. + +If your use case requires that the UUID persist across upgrades, you can set the `store_uuid` option in `agent.conf`: + +``` nginx +[credentials] +... +store_uuid = True +``` + +After restarting the Controller Agent -- `service controller-agent restart` -- the UUID will be persisted to `agent.conf` and used for future instance detection. + +## Set the Path to the NGINX Configuration File + +The NGINX Controller Agent detects the NGINX configuration file automatically. You shouldn't need to point the NGINX Controller Agent to the `nginx.conf` file explicitly. + +{{< caution >}}You should not make manual changes to the `nginx.conf` file on NGINX Plus instances that are managed by NGINX Controller. Manually updating the `nginx.conf` file on managed instances may adversely affect system performance. In most cases, NGINX Controller will revert or overwrite manual updates made to `nginx.conf`.{{< /caution >}} + +If, for some reason, the NGINX Controller Agent cannot find the NGINX configuration, you can use the following option in `/etc/controller-agent/agent.conf` to point to the configuration file: + +``` nginx +[nginx] +configfile = /etc/nginx/nginx.conf +``` + +{{< note >}} We recommend using this option only as a workaround if needed. If you do need to add the path to the NGINX config file, we ask that you [contact NGINX Support]({{< relref "/controller/support/contact-support.md" >}}) so they can help troubleshoot the issue.{{< /note >}} + +## Set Host Tags + +You can define arbitrary tags on a "per-host" basis. Tags can be configured in the Controller user interface on the **Graphs** page, or set in the `/etc/controller-agent/agent.conf` file: + +``` nginx +[credentials] +tags = foo bar foo:bar +``` + +{{< note >}} Any changes to instance Tags made in the Controller user interface will overwrite the values stored in `agent.conf`.{{< /note >}} + +You can use tags to build custom graphs, configure alerts, and filter the systems on the **Graphs** page in the Controller user interface. + +## Logging to Syslog + +{{< see-also >}} +[NGINX Admin Guide - Logging to Syslog](https://docs.nginx.com/nginx/admin-guide/monitoring/logging/#logging-to-syslog) +{{< /see-also >}} + +The NGINX Controller Agent can collect NGINX log files using `syslog`. This could be useful when you don't keep the NGINX logs on disk, or when monitoring a container environment such as Docker with NGINX Controller. + +To configure the NGINX Controller Agent to send logs to `syslog`: + +1. Add the following to the `/etc/controller-agent/agent.conf` file: + + ``` nginx + [listeners] + keys = syslog-default + + [listener_syslog-default] + address = 127.0.0.1:12000 + ``` + +2. Restart the NGINX Controller Agent. This will reload the configuration, and the Agent will start listening on the specified IP address and port: + + ``` nginx + # service controller-agent restart + ``` + + {{< important >}} +Make sure you [add the `syslog` settings to your NGINX configuration file]({{< relref "/controller/admin-guides/config-agent/configure-metrics-collection.md#collect-metrics-from-syslog" >}}) as well. + {{< /important >}} + +## Exclude Certain NGINX Log Files + +By default, the NGINX Controller Agent tries to find and watch all `access.log` files described in the NGINX configuration. If there are multiple log files where the same request is logged, the metrics may be counted more than once. + +To exclude specific NGINX log files from the metrics collection, add lines similar to the following to `/etc/controller-agent/agent.conf`: + +``` nginx +[nginx] +exclude_logs=/var/log/nginx/app1/*,access-app1-*.log,sender1-*.log +``` + +## Set Up a Proxy + +If your system is in a DMZ environment without direct access to NGINX Controller, the only way for the NGINX Controller Agent to report collected metrics to NGINX Controller is through a proxy. + +The NGINX Controller Agent will use the usual environment variables common on Linux systems (for example, `https_proxy` or `HTTP_PROXY`). However, you can also define HTTPS proxy manually in `agent.conf`. This can be done as follows: + +``` nginx +[proxies] +https = https://10.20.30.40:3030 +.. +``` + +## Controller Agent Logfile + +The NGINX Controller Agent maintains its log file in `/var/log/nginx-controller/agent.log`. + +Upon installation, the NGINX Controller Agent's log rotation schedule is added to `/etc/logrotate.d/controller-agent`. + +The normal level of logging for the NGINX Controller Agent is `INFO`. If you ever need to debug the NGINX Controller Agent, change the level to `DEBUG` as described below. + +{{< caution >}} +The size of the NGINX Controller Agent's log file can proliferate in `DEBUG` mode. You should use `DEBUG` mode only for troubleshooting purposes. +{{< /caution >}} + +### Change the Agent Log Level + +To change the log level for the NGINX Controller Agent: + +1. Edit the `[loggers]` section of the NGINX Controller Agent configuration file -- `/etc/controller-agent/agent.conf`. +1. Set the `level` to one of the following: + + - error + - info + - debug + - trace + + ```plaintext + [loggers] + level = DEBUG + ... + ``` + +1. [Restart the NGINX Controller Agent]({{< relref "/controller/admin-guides/install/agent-restart.md#Starting-and-Stopping-the-Agent" >}}) to make the changes take effect. + +## What's Next + +- [Set up Metrics Collection]({{< relref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/config-agent/use-agent-with-docker.md b/content/controller/admin-guides/config-agent/use-agent-with-docker.md new file mode 100644 index 000000000..d6debdf89 --- /dev/null +++ b/content/controller/admin-guides/config-agent/use-agent-with-docker.md @@ -0,0 +1,29 @@ +--- +description: Learn how to use the F5 NGINX Controller Agent in a Docker environment. +docs: DOCS-511 +doctypes: +- reference +tags: +- docs +title: Use the NGINX Controller Agent with Docker +toc: true +weight: 200 +--- + +## Before You Begin + +We support running the F5 NGINX Controller Agent in a Docker environment on the following distributions: CentOS, Debian, and Ubuntu. + +We **don't support** containerized instances on RHEL 7 and RHEL 8. + +For optimal performance when using the Controller Agent in a Docker environment, the number of containers shouldn't exceed the number of processors on the container host. + +## Running NGINX Controller Agent in Docker + +When running a containerized instance on an Ubuntu or Debian docker host, you need to enable cgroup swap limit capabilities in order for the NGINX Controller Agent to be able to report swap metrics for instances. See [Docker - Linux post-installation steps](https://docs.docker.com/engine/install/linux-postinstall/#your-kernel-does-not-support-cgroup-swap-limit-capabilities) for details. + +Refer to the [nginxinc/docker-nginx-controller](https://github.com/nginxinc/docker-nginx-controller) repository in GitHub for a set of guidelines that you can use today as we continue to enhance the experience. + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/_index.md b/content/controller/admin-guides/install/_index.md new file mode 100644 index 000000000..0a0f35294 --- /dev/null +++ b/content/controller/admin-guides/install/_index.md @@ -0,0 +1,13 @@ +--- +description: Learn how to install and update F5 NGINX Controller and the NGINX Controller + Agent. +menu: + docs: + parent: Admin Guides + title: Installation + weight: 10 +title: Installation +weight: 100 +aliases: /admin-guides/installation/ +url: /nginx-controller/admin-guides/install/ +--- diff --git a/content/controller/admin-guides/install/agent-restart.md b/content/controller/admin-guides/install/agent-restart.md new file mode 100644 index 000000000..7e6190270 --- /dev/null +++ b/content/controller/admin-guides/install/agent-restart.md @@ -0,0 +1,47 @@ +--- +description: How to start, stop, and verify the state of the F5 NGINX Controller Agent + service. +docs: DOCS-251 +doctypes: +- task +tags: +- docs +title: Manage the NGINX Controller Agent Service +toc: true +weight: 210 +--- + +## Starting and Stopping the Agent + +To start, stop, and restart the F5 NGINX Controller Agent, run the following commands on the NGINX Plus system where you installed the Agent. + +Start the NGINX Controller Agent: + +```bash +service controller-agent start +``` + +Stop the NGINX Controller Agent: + +```bash +service controller-agent stop +``` + +Restart the NGINX Controller Agent: + +```bash +service controller-agent restart +``` + +## Verify that the Agent Has Started + +To verify that the NGINX Controller Agent has started, run the following command on the NGINX Plus system where you installed the Agent: + +```bash +ps ax | grep -i 'controller\-' +2552 ? S 0:00 controller-agent +``` + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/get-n-plus-cert-and-key.md b/content/controller/admin-guides/install/get-n-plus-cert-and-key.md new file mode 100644 index 000000000..2e8c1f977 --- /dev/null +++ b/content/controller/admin-guides/install/get-n-plus-cert-and-key.md @@ -0,0 +1,88 @@ +--- +description: How to download the F5 NGINX Plus nginx.crt and nginx.key files using the + NGINX Controller API. +docs: DOCS-252 +doctypes: +- task +tags: +- docs +title: Download the NGINX Plus Cert and Key Bundle +toc: true +weight: 105 +--- + +## Overview + +This topic explains how to use the [F5 NGINX Controller REST API](https://docs.nginx.com/nginx-controller/api/ctlr-platform-api/) to download your NGINX Plus `nginx.crt` and `nginx.key` files. You'll need these files if you're [installing NGINX Plus as part of an NGINX Controller trial]({{< relref "/controller/admin-guides/install/try-nginx-controller.md" >}}). + +  + +## Authenticate with the NGINX Controller API + +The NGINX Controller API uses session cookies to authenticate requests. The session cookie is returned in response to a `GET /api/v1/platform/login` request. See the Login endpoint in the [NGINX Controller API Reference]({{< relref "/controller/api/_index.md" >}}) documentation for information about session cookie timeouts and invalidation. + +{{< tip >}} +You can send a GET request to the login endpoint to find the status of the session token. +{{< /tip >}} + +For example: + +- Login and capture the session cookie: + + ```curl + curl -c cookie.txt -X POST --url 'https:///api/v1/platform/login' --header 'Content-Type: application/json' --data '{"credentials": {"type": "BASIC","username": "","password": ""}}' + ``` + +- Use the session cookie to authenticate and get the session status: + + ```curl + curl -b cookie.txt -c cookie.txt -X GET --url 'https:///api/v1/platform/login' + ``` + + +  + +--- + +## Download the NGINX Plus Certificate and Key Bundle + +To use the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}) to download your NGINX Plus certificate and key bundle as a gzip or JSON file, send a GET request to the `/platform/licenses/nginx-plus-licenses/controller-provided` endpoint. + +For example: + +- Download JSON file: + + ```bash + curl -b cookie.txt -c cookie.txt --header 'Content-Type: application/json' -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.json + ``` + +- Download GZIP file: + + ```bash + curl -b cookie.txt -c cookie.txt -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.gz + ``` + +{{< note >}} +If you are using a self-signed certificate you will need to add `-k` (allow insecure connections) to your curl command to be able to download your NGINX Plus certificate and key bundle. +{{< /note >}} + + +Once you have downloaded your certificate and key bundle you will need to expand the `.gz` file to get your certificate and key pair. + +For example: + +```bash +gunzip nginx-plus-certs.gz +``` + +--- + +## What's Next + +- [Trial NGINX Controller with NGINX Plus]({{< relref "/controller/admin-guides/install/try-nginx-controller.md" >}}) + +  + +{{< versions "3.10" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/install-agent-non-root.md b/content/controller/admin-guides/install/install-agent-non-root.md new file mode 100644 index 000000000..22e30d56d --- /dev/null +++ b/content/controller/admin-guides/install/install-agent-non-root.md @@ -0,0 +1,106 @@ +--- +description: How to install the F5 NGINX Controller Agent to run as a non-root user. +docs: DOCS-253 +doctypes: +- beta +- tutorial +- troubleshooting +title: Install NGINX Controller Agent for Non-root Users +toc: true +weight: 205 +--- + +## Overview + +This document provides the instructions to run F5 NGINX Controller Agent as a non-root user, by making a few adjustments to the deployment process. + +  + +--- + +## Before You Begin + +Before you follow the steps to deploy and run the Controller Agent as a non-root user, [install NGINX Controller]({{< relref "/controller/admin-guides/install/install-nginx-controller" >}}) following the normal installation process. Once you reach the step **Install NGINX Controller Agent** follow the steps in this guide instead. + +  + +--- + +## Install NGINX Controller Agent to Run as a Non-root User + +Take the following steps to add an instance to NGINX Controller: + +1. Open the NGINX Controller user interface and log in. +1. Select the NGINX Controller menu icon, then select **Infrastructure**. +1. On the **Infrastructure** menu, select **Instances** > **Overview**. +1. On the **Instances** overview page, select **Create**. +1. On the **Create Instance** page, select **Add an existing instance**. +1. Add a name for the instance. If you don't provide a name, the hostname of the instance is used by default. +1. To add the instance to an existing Location, select a Location from the list. Or to create a Location, select **Create New**. + + {{< important >}} +Once set, the Location for an instance cannot be changed. If you need to change or remove the Location for an instance, you must [remove the instance from NGINX Controller]({{< relref "/controller/infrastructure/instances/manage-instances.md#delete-an-instance" >}}), and then add it back. + {{< /important >}} + +1. (Optional) By default, registration of NGINX Plus instances is performed over a secure connection. To use self-signed certificates with the Controller Agent, select **Allow insecure server connections to NGINX Controller using TLS**. For security purposes, we recommend that you secure the Controller Agent with signed certificates when possible. +1. Use SSH to connect and log in to the NGINX instance that you want to connect to NGINX Controller. +1. Copy the `curl` or `wget` command that's shown in the **Installation Instructions** section on the NGINX instance to download and install the Controller Agent package. When specified, the `-i` and `-l` options for the `install.sh` script refer to the instance name and Location, respectively. **You need to modify this command to use a non-root user** +1. Add the parameter `CONTROLLER_USER=''` to the `curl` or `wget` command, substituting the value in the brackets with your desired non-root user. +1. (Optional) Add the parameter `CONTROLLER_GROUP=''` to the `curl` or `wget` command, substituting the value in the brackets with your desired group. If this parameter is not set, a new group with the same name as the user will be created. +1. The `curl` or `wget` command looks similar to this example after applying the required changes: + + ```bash + curl -sS -L https:///install/controller-agent > install.sh && API_KEY='' CONTROLLER_USER='' CONTROLLER_GROUP='' -i -l + ``` + + {{< note >}} + +Make sure you enter the commands to download and run the `install.sh` script on the NGINX Plus system, and not on the NGINX Controller. + +NGINX Controller 3.6 and earlier require Python 2.6 or 2.7. You'll be prompted to install Python if it's not installed already. Python is not required for NGINX Controller v3.7 and later. + +If `CONTROLLER_USER` is not set, during the installation you will see the message `Installing agent to run as root` in red. + +Running agent as non-root changes the nap-syslog port to `5114` in both containerized and non-containerized instances. + + {{< /note >}} + +  + +After a few minutes, the NGINX instance will appear on the **Instances** overview page. + +For the NGINX Agent to run properly, NGINX Plus **must** be running as the same user and group as the Agent. To change the user and group NGINX Plus is running as after installing the agent: + +1. Manually edit the `/lib/systemd/system/nginx.service` file and under the `[Service]` block add the lines `User=` and `Group=` replacing the values in brackets with the values chosen during the installation. +1. Run `sudo chown -R : /etc/nginx/ /var/log/nginx/ /var/cache/nginx/` to change the permissions to your non-root user. +1. Ensure the ports NGINX is listening to are all above 1000: Check the NGINX `default.conf` file (usually `/etc/nginx/conf.d/default.conf`) and make sure that the `listen` values are all over `1000`. +1. (CentOS/RHEL) If you're installing the Controller Agent as a non-root user on CentOS or RHEL, make these additional changes: + + - In in the `[Service]` section of `/lib/systemd/system/nginx.service`, set the location for the `PIDfile` to: + + ```nginx + [Service] + PIDFile=/var/tmp/nginx.pid + ``` + + - In `/etc/nginx/nginx.conf`, set the `pid` directive to: + + ```nginx + pid /var/tmp/nginx.pid; + ``` + + +1. Run `sudo systemctl daemon-reload && sudo systemctl restart nginx` to pick up the new configuration. + +  + +--- + +## Verification Steps + +Run `top -u ` for your chosen user. The `/usr/bin/nginx-controller-agent` process will appear in the list of processes. + + +{{< versions "3.16" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/install-for-controller.md b/content/controller/admin-guides/install/install-for-controller.md new file mode 100644 index 000000000..5d02707bc --- /dev/null +++ b/content/controller/admin-guides/install/install-for-controller.md @@ -0,0 +1,533 @@ +--- +description: Take the steps in this guide to deploy F5 NGINX App Protect WAF as a datapath + instance for use with NGINX Controller. +docs: DOCS-645 +doctypes: +- task +title: Using NGINX App Protect WAF with NGINX Controller +toc: true +weight: 500 +--- + +**Note:** Refer to the [F5 NGINX Controller Technical Specifications]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}) guide to find out which distributions are supported for use with NGINX Controller and NGINX Controller Agent. + +## Setup + +Before proceeding, you should review the [Prerequisites]({{< relref "/nap-waf/v4/admin-guide/install#prerequisites" >}}), [Platform Security Considerations]({{< relref "/nap-waf/v4/admin-guide/install#platform-security-considerations" >}}) and [User Permissions]({{< relref "/nap-waf/v4/admin-guide/install#user-permissions" >}}) sections of the NGINX App Protect WAF Admin Guide. + + +## Install NGINX App Protect WAF + +**Note:** If a version of NGINX App Protect WAF prior to 3.6 is required, please contact the NGINX Sales team to assist with this configuration. + +{{}} + +{{%tab name="CentOS 7.4+"%}} + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [NGINX Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + + **See Also:** You can use the [NGINX Controller REST API to download the key and cert files]({{< relref "/controller/admin-guides/install/get-n-plus-cert-and-key" >}}). + +4. Copy the above two files to the CentOS server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo yum install ca-certificates epel-release wget + ``` + +6. Remove any previously downloaded NGINX Plus repository file from /etc/yum.repos.d: + + ```shell + sudo rm /etc/yum.repos.d/nginx-plus-*.repo + ``` + +7. Add NGINX Plus repository by downloading the file nginx-plus-7.4.repo to /etc/yum.repos.d: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + ``` + +8. Add NGINX App Protect WAF repository by downloading the file app-protect-7.repo to /etc/yum.repos.d: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + ``` + +9. If NGINX Plus or NGINX App Protect WAF was previously installed on the system, clean up package manager cache information: + + ```shell + sudo yum clean all + ``` + +10. Install the latest NGINX App Protect WAF package. + + **See Also:** Please refer to [NGINX App Protect Compatibility Matrix]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#nginx-app-protect-compatibility-matrix" >}}) for specific version compatibility. + + If you wish to install a specific version, please replace `app-protect` with the target version, for example `app-protect-25+3.671.0`: + + ```shell + sudo yum install app-protect + ``` + +11. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +12. Configure SELinux as appropriate per your organization’s security policies. NGINX App Protect WAF applies the prebuilt SELinux policy module during the installation. If you encounter any issues, check the [Troubleshooting Guide]({{< relref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). + + **Note:** NGINX Controller has specific [requirements regarding SELinux configuration]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#supported-distributions" >}}). + +13. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +14. Start the `bd_agent` service (for Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2 only) + + If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, you need to start the `bd_agent`: + + ```shell + /bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' nginx + ``` + +15. Verify NGINX Plus and BD processes are running: + + ```shell + ps -ef | grep nginx + ps -ef | grep bd + ``` + + **Note:** If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, also verify that `bd_agent` is running: + + ```shell + ps -ef | grep bd_agent + ``` + +16. To upgrade your signature package to the latest version and obtain the best protection, refer to [Updating App Protect Attack Signatures]({{< relref "/nap-waf/v4/admin-guide/install#centos--rhel-74--amazon-linux-2">}}). + +{{%/tab%}} + +{{%tab name="Red Hat Enterprise Linux 7.4+"%}} + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [NGINX Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the RHEL server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo yum install ca-certificates wget + ``` + +6. Remove any previously downloaded NGINX Plus repository file from /etc/yum.repos.d: + + ```shell + sudo rm /etc/yum.repos.d/nginx-plus-*.repo + ``` + +7. Add NGINX Plus repository by downloading the file `nginx-plus-7.4.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + ``` + +8. Add NGINX App Protect WAF repository by downloading the file app-protect-7.repo to /etc/yum.repos.d: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + ``` + +9. Enable Yum repositories to pull App Protect dependencies: + + - Download the file `dependencies.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo + ``` + + - If you have a RHEL subscription: + + ```shell + sudo yum-config-manager --enable rhui-REGION-rhel-server-optional rhui-REGION-rhel-server-releases rhel-7-server-optional-rpms + ``` + + - If you don't have a RHEL subscription, you can pull the dependencies from the CentOS repository. + + Create a new repository, `centos.repo`, in `/etc/yum.repos.d/` with the content: + + ```shell + [centos] + name=CentOS-7 + baseurl=http://ftp.heanet.ie/pub/centos/7/os/x86_64/ + enabled=1 + gpgcheck=1 + gpgkey=http://ftp.heanet.ie/pub/centos/7/os/x86_64/RPM-GPG-KEY-CentOS-7 + ``` + +10. If NGINX Plus or NGINX App Protect WAF was previously installed on the system, clean up package manager cache information: + + ```shell + sudo yum clean all + ``` + +11. Install the latest NGINX App Protect WAF package. + + **See Also:** Please refer to [NGINX App Protect Compatibility Matrix]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#nginx-app-protect-compatibility-matrix" >}}) for specific version compatibility. + + If you wish to install a specific version, please replace `app-protect` with the target version, for example `app-protect-25+3.671.0`: + + ```shell + sudo yum install app-protect + ``` + +12. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +13. Configure SELinux as appropriate per your organization’s security policies. NGINX App Protect WAF applies the prebuilt SELinux policy module during the installation. If you encounter any issues, check the [Troubleshooting Guide]({{< relref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). + + **Note:** NGINX Controller has specific [requirements regarding SELinux configuration]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#supported-distributions" >}}). + +14. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +15. Start the `bd_agent` service (for Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2 only) + + If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, you need to start the `bd_agent`: + + ```shell + /bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' nginx + ``` + +16. Verify NGINX Plus and BD processes are running: + + ```shell + ps -ef | grep nginx + ps -ef | grep bd + ``` + + **Note:** If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, also verify that `bd_agent` is running: + + ```shell + ps -ef | grep bd_agent + ``` + +17. To upgrade your signature package to the latest version and obtain the best protection, refer to [Updating App Protect Attack Signatures]({{< relref "/nap-waf/v4/admin-guide/install#centos--rhel-74--amazon-linux-2" >}}). + +{{%/tab%}} + +{{%tab name="Debian"%}} + +**Note:** As of NGINX Plus R24, support for Debian 9 is no longer available. As a consequence, NGINX App Protect WAF 3.1 is the final version available for this operating system version. + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [NGINX Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the Debian server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install apt utils: + + ```shell + sudo apt-get install apt-transport-https lsb-release ca-certificates wget + ``` + +6. Download and add the NGINX signing key: + + ```shell + sudo wget https://cs.nginx.com/static/keys/nginx_signing.key && sudo apt-key add nginx_signing.key + ``` + +7. Remove any previous NGINX Plus repository and apt configuration files: + + ```shell + sudo rm /etc/apt/sources.list.d/nginx-plus.list + sudo rm /etc/apt/apt.conf.d/90nginx + ``` + +8. Add NGINX Plus repository: + + ```shell + printf "deb https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-plus.list + ``` + +9. Add NGINX App Protect WAF repository: + + ```shell + printf "deb https://pkgs.nginx.com/app-protect/debian `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-app-protect.list + ``` + +10. Download the apt configuration to `/etc/apt/apt.conf.d`: + + ```shell + sudo wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + ``` + +11. Update the repository and install the lastest supported NGINX App Protect WAF packages. + + **See Also:** Please refer to [NGINX App Protect Compatibility Matrix]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#nginx-app-protect-compatibility-matrix" >}}) for specific version compatibility. + + ```shell + sudo apt-get update + sudo apt-get install nginx-plus-module-appprotect + ``` + + To install a specific version based on the NGINX Plus version, for example `r25`, follow these steps: + + ```shell + sudo apt-cache policy app-protect | grep 25+ + 25+3.760.0-1~buster 500 + 25+3.733.0-1~buster 500 + 25+3.671.0-1~buster 500 + + sudo apt-get install nginx-plus-module-appprotect=25+3.671.0-1~buster + ``` + +12. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +13. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +14. Start the `bd_agent` service (for Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2 only) + + If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, you need to start the `bd_agent`: + + ```shell + /bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' nginx + ``` + +15. Verify NGINX Plus and BD processes are running: + + ```shell + ps -ef | grep nginx + ps -ef | grep bd + ``` + + **Note:** If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, also verify that the `bd_agent` is running: + + ```shell + ps -ef | grep bd_agent + ``` + +16. To upgrade your signature package to the latest version and obtain the best protection, refer to [Updating App Protect Attack Signatures]({{< relref "/nap-waf/v4/admin-guide/install#debian-10" >}}). + +{{%/tab%}} + +{{%tab name="Ubuntu"%}} + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [NGINX Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the Ubuntu server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install apt utils: + + ```shell + sudo apt-get install apt-transport-https lsb-release ca-certificates wget + ``` + +6. Download and add the NGINX signing key: + + ```shell + sudo wget https://cs.nginx.com/static/keys/nginx_signing.key && sudo apt-key add nginx_signing.key + ``` + +7. Remove any previous NGINX Plus repository and apt configuration files: + + ```shell + sudo rm /etc/apt/sources.list.d/nginx-plus.list + sudo rm /etc/apt/apt.conf.d/90nginx + ``` + +8. Add NGINX Plus repository: + + ```shell + printf "deb https://pkgs.nginx.com/plus/ubuntu `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-plus.list + ``` + +9. Add NGINX App Protect WAF repository: + + ```shell + printf "deb https://pkgs.nginx.com/app-protect/ubuntu `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-app-protect.list + ``` + +10. Download the apt configuration to `/etc/apt/apt.conf.d`: + + ```shell + sudo wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + ``` + +11. Update the repository and install the latest App Protect WAF package. + + **See Also:** Please refer to [NGINX App Protect Compatibility Matrix]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#nginx-app-protect-compatibility-matrix" >}}) for specific version compatibility. + + ```shell + sudo apt-get update + sudo apt-get install app-protect + ``` + + To install a specific version based on the NGINX Plus version, for example `r25`, follow these steps: + + ```shell + sudo apt-cache policy app-protect | grep 25+ + 25+3.760.0-1~bionic 500 + 25+3.733.0-1~bionic 500 + 25+3.671.0-1~bionic 500 + + sudo apt-get install app-protect=25+3.671.0-1~bionic + ``` + +12. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +13. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +14. Start the `bd_agent` service (for Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2 only) + + If you plan to use this instance with Controller Agent 3.20.1, you need to start `bd_agent`: + + ```shell + /bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' nginx + ``` + +15. Verify NGINX Plus and BD processes are running: + + ```shell + ps -ef | grep nginx + ps -ef | grep bd + ``` + + **Note:** If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, also verify that `bd_agent` is running: + + ```shell + ps -ef | grep bd_agent + ``` + +16. To upgrade your signature package to the latest version and obtain the best protection, refer to [Updating App Protect Attack Signatures]({{< relref "/nap-waf/v4/admin-guide/install#ubuntu-1804" >}}). + + **Note:** Ubuntu 20.04 activates **AppArmor** by default, but NGINX App Protect WAF will run in unconfined mode after being installed as it is shipped with no AppArmor profile. To benefit from AppArmor access control capabilities for NGINX App Protect WAF, you will have to write your own AppArmor profile for NGINX App Protect WAF executables found in `/opt/app_protect/bin` such that it best suits your environment. + +{{%/tab%}} + +{{%tab name="Amazon Linux 2 LTS"%}} + +Using NGINX App Protect WAF with NGINX Controller isn't supported on Amazon Linux 2 LTS. + +{{%/tab%}} + +{{%tab name="Alpine"%}} + +Using NGINX App Protect WAF with NGINX Controller isn't supported on Alpine. + +{{%/tab%}} +{{}} + +
+ +## Add NGINX App Protect WAF to NGINX Controller + +If this NGINX Plus instance is already managed by Controller, [restart the Agent]({{< relref "/controller/admin-guides/install/agent-restart" >}}) after NGINX App Protect WAF is installed. + +Otherwise, complete the tasks in the NGINX Controller [Add an NGINX App Protect WAF Instance]({{< relref "/controller/infrastructure/instances/add-nap-instance.md#add-the-nginx-app-protect-instance" >}}) guide. + +## Use NGINX App Protect WAF with NGINX Controller + +**Note:** When configuring NGINX App Protect WAF as a datapath instance for NGINX Controller, **you should not modify the `nginx.conf` file**. The `nginx.conf` file will be automatically updated when enabling WAF on a Component in NGINX Controller. + +Refer to the following NGINX Controller user guides for further information about how to secure your apps and/or APIs with NGINX Controller: + +- [Learn about App Security for the NGINX Controller App Delivery module]({{< relref "/controller/app-delivery/security/concepts/what-is-waf" >}}) +- [Add Security to your Apps with the NGINX Controller App Delivery module]({{< relref "/controller/app-delivery/security/tutorials/add-app-security-with-waf" >}}) +- [Add Advanced Security (WAF) to your APIs with the NGINX Controller API Management module]({{< relref "/controller/api-management/manage-apis.md#define-the-routing-rules" >}}). diff --git a/content/controller/admin-guides/install/install-nginx-controller-agent.md b/content/controller/admin-guides/install/install-nginx-controller-agent.md new file mode 100644 index 000000000..7378817ba --- /dev/null +++ b/content/controller/admin-guides/install/install-nginx-controller-agent.md @@ -0,0 +1,184 @@ +--- +description: How to install, upgrade, and uninstall the F5 Controller Agent. +docs: DOCS-254 +doctypes: +- tutorial +- troubleshooting +tags: +- docs +title: Install NGINX Controller Agent +toc: true +weight: 150 +--- + +## Overview + +This page shows how to install, update, and uninstall the F5 NGINX Controller Agent. + +You can use the NGINX Controller Agent to monitor your systems with the NGINX Controller. + +## Objectives + +- Install the NGINX Controller Agent +- Upgrade the NGINX Controller Agent to a newer version +- Uninstall the NGINX Controller Agent + +## Install the NGINX Controller Agent + +{{< see-also >}} If you want to run the NGINX Controller Agent as a non-root user, follow the alternative instructions in the [Install NGINX Controller Agent for Non-root User]({{< relref "/controller/admin-guides/install/install-agent-non-root.md" >}}) guide instead of the steps provided in this section. {{< /see-also >}} + +Take the following steps to add an instance to NGINX Controller: + +1. Open the NGINX Controller user interface and log in. +2. Select the NGINX Controller menu icon, then select **Infrastructure**. +3. On the **Infrastructure** menu, select **Instances** > **Overview**. +4. On the **Instances** overview page, select **Create**. +5. On the **Create Instance** page, select **Add an existing instance**. +6. Add a name for the instance. If you don't provide a name, the hostname of the instance is used by default. +7. To add the instance to an existing [Instance Group]({{< relref "/controller/infrastructure/instances/manage-instances.md#instance-groups" >}}), select an Instance Group from the list. Or to create an Instance Group, select **Create New**. +8. To add the instance to an existing Location, select a Location from the list. Or to create a Location, select **Create New**. + + {{< important >}} +Once set, the Location for an instance cannot be changed. If you need to change or remove the Location for an instance, you must [remove the instance from NGINX Controller]({{< relref "/controller/infrastructure/instances/manage-instances.md#delete-an-instance" >}}), and then add it back. + {{< /important >}} + + {{< important >}} +Instances and the instance groups they belong to should specify the same location; however, this requirement is not currently enforced. If different locations are specified, the instance group's location takes precedence. This is important to remember when [assigning locations to workload groups]({{< relref "/controller/app-delivery/manage-apps.md#workload-groups">}}). + {{< /important >}} + +9. (Optional) By default, registration of NGINX Plus instances is performed over a secure connection. To use self-signed certificates with the Controller Agent, select **Allow insecure server connections to NGINX Controller using TLS**. For security purposes, we recommend that you secure the Controller Agent with signed certificates when possible. +10. Use SSH to connect and log in to the NGINX instance that you want to connect to NGINX Controller. +11. Run the `curl` or `wget` command that's shown in the **Installation Instructions** section on the NGINX instance to download and install the Controller Agent package. When specified, the `-i` and `-l` options for the `install.sh` script refer to the instance name and Location, respectively. + + {{< note >}} + +Make sure you enter the commands to download and run the `install.sh` script on the NGINX Plus system, and not on the NGINX Controller. + +NGINX Controller 3.6 and earlier require Python 2.6 or 2.7. You'll be prompted to install Python if it's not installed already. Python is not required for NGINX Controller v3.7 and later. + + {{< /note >}} + +After a few minutes, the NGINX instance will appear on the **Instances** overview page. + + +## Update the NGINX Controller Agent + +When you [update NGINX Controller]({{< relref "/controller/admin-guides/install/install-nginx-controller.md#update-nginx-controller" >}}), you also need to update the NGINX Controller Agent software on each monitored NGINX Plus instance. + +To update the NGINX Controller Agent, take the following steps: + +1. Open the NGINX Controller user interface and log in. +1. Select the NGINX Controller menu icon, then select **Infrastructure**. +1. On the **Infrastructure** menu, select **Instances** > **Overview**. +1. On the **Instances** overview page, select **Create**. +1. Follow the instructions in the **Install Instructions** pane to connect to the NGINX instance and install the updated Controller Agent package. + + {{< note >}} + +NGINX Controller 3.6 and earlier require Python 2.6 or 2.7. You'll be prompted to install Python if it's not installed already. Python is not required for NGINX Controller 3.7 and later. + + {{< /note >}} + + +## Uninstall the Analytics, Visibility, and Reporting Daemon (AVRD) + +NGINX Controller uses an [Analytics, Visibility, and Reporting daemon (AVRD)]({{< relref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) to aggregate and report app-centric metrics. You can use these metrics to monitor your apps' performance and health. + +To uninstall AVRD and the supporting modules, run the following command on each dataplane instance: + +- Debian/Ubuntu + + ```bash + sudo apt-get purge avrd nginx-plus-module-metrics avrd-libs + ``` + +- RedHat/CentOS + + ```bash + sudo yum remove avrd avrd-metrics nginx-plus-module-metrics + ``` + + +## Uninstall the NGINX Controller Agent and Delete an Instance + +Take the following steps to uninstall the Controller Agent and delete an instance. + +{{< important >}}Be sure to uninstall the Controller Agent first, before you delete an instance. If you don't uninstall the Controller Agent first, the instance may reappear in NGINX Controller after it has been deleted.{{< /important >}} + +1. On your NGINX Plus instance, stop the Controller Agent service: + + - On Ubuntu/Debian: + + ```bash + service controller-agent stop + ``` + + - On CentOS/Red Hat Enterprise Linux: + + ```bash + systemctl stop controller-agent + ``` + +1. Run the appropriate command for your distribution to uninstall the Controller Agent: + + - On Ubuntu/Debian: + + ``` bash + apt-get purge nginx-controller-agent + ``` + + - On CentOS/Red Hat Enterprise Linux: + + ``` bash + yum remove nginx-controller-agent + ``` + + After the package is removed, you can safely delete the files in `/etc/controller-agent/` and `/var/log/nginx-controller/`. + +1. (Optional) If you use SELinux on CentOS or Red Hat Enterprise Linux, take the following steps to remove the SELinux policy that was created when the Controller Agent was installed: + + 1. Revert the installed permissions: + + ```bash + sudo semodule -r nginx + ``` + + 1. Remove the following files: + + - `nginx.te` + - `nginx.mod` + - `nginx.pp` + +1. Delete the NGINX Plus instance from the NGINX Controller user interface: + + 1. Open the NGINX Controller user interface and log in. + + 1. Select the NGINX Controller menu icon, then select **Infrastructure**. + + 1. On the **Infrastructure** menu, select **Instances** > **Overview**. + + 1. On the **Instances** overview page, select the NGINX Plus instance that you want to delete. + + 1. Select the delete icon (trash can). + +1. Delete alerts: + + {{< note >}}When you delete an instance, any related alerts for that instance are not deleted automatically. You can delete the alerts manually, however.{{< /note >}} + + 1. Open the NGINX Controller user interface and log in. + 2. On the Analytics menu, select **Alerts > Alert Rules**. + 3. Select the alert rule that you want to delete. + 4. Select the delete (trash can) icon to delete the alert rule. + 5. Select **Delete** in the pop-up box to confirm that you want to proceed. + + +## What's Next + +- [Customize how the NGINX Controller Agent collects metrics]({{< relref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) +- [Start or Stop the Agent Service]({{< relref "/controller/admin-guides/install/agent-restart.md" >}}) +- [Manage your NGINX Instances]({{< relref "/controller/infrastructure/instances/manage-instances.md" >}}) +- [Manage Locations for your Instances]({{< relref "/controller/infrastructure/locations/manage-locations.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/install-nginx-controller-rhel-8.md b/content/controller/admin-guides/install/install-nginx-controller-rhel-8.md new file mode 100644 index 000000000..a0c17f1a0 --- /dev/null +++ b/content/controller/admin-guides/install/install-nginx-controller-rhel-8.md @@ -0,0 +1,160 @@ +--- +description: This guide explains how to prepare your RHEL 8 system to install F5 NGINX + Controller and the NGINX Controller Agent. +docs: DOCS-342 +doctypes: +- beta +- tutorial +tags: +- docs +title: Install NGINX Controller on RHEL 8 (experimental) +toc: true +weight: 200 +--- + +## Preparing the F5 NGINX Controller Host + +To install NGINX Controller on RHEL 8, you must complete the following steps to allow iptables-based routing for Kubernetes. Failure to complete these steps may cause the installation to hang. + +### Update System Packages + +1. Before completing any other steps, update the packages on your system: + + ```bash + sudo yum -y upgrade + ``` + +### Install and Configure Docker + +Docker isn't available on RedHat 8 by default, so you'll need to add a Docker repository and install the required packages: + +1. Add the Docker repo: + + ```bash + sudo yum config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo + ``` + +1. Install the Docker packages: + + ```bash + sudo yum install docker-ce-3:19.03.15-3.el8 docker-ce-cli-1:19.03.15-3.el8 containerd.io-1.3.9-3.1.el8 + ``` + +1. Set up the Docker daemon: + + ```bash + sudo mkdir -p /etc/docker + + sudo vi /etc/docker/daemon.json + ``` + + Paste the following JSON snippet into `daemon.json`: + + ```json + { + "exec-opts": ["native.cgroupdriver=systemd"], + "log-driver": "json-file", + "log-opts": { + "max-size": "10m", + "max-file": "2" + }, + "storage-driver": "overlay2" + } + ``` + +1. Run the following commands to set up the Docker service: + + ```bash + sudo systemctl start docker.service + + sudo systemctl status docker.service + + sudo systemctl enable docker.service + ``` + +### Install Required Packages and Kernel Modules + +Take the following steps to install the required packages and kernel modules. + +1. Install the traffic control utility: + + ``` bash + sudo yum install iproute-tc + ``` + +1. Run the following commands to ensure the required kernel modules are loaded at startup: + + ```bash + cat <}}). + +## Preparing the Data Plane Host + +1. For the NGINX Controller Agent to work on RHEL 8, you need to install the following package on each data plane host: + + ``` bash + sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + sudo dnf install -y xerces-c + ``` + +2. Complete the steps in the NGINX Controller Agent Installation guide to [install the NGINX Controller Agent]({{< relref "/controller/admin-guides/install/install-nginx-controller-agent" >}}). + +## Troubleshooting + +You may encounter the following error when installing or updating NGINX Controller on RHEL 8: + +``` text +Status code: 403 for https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/repodata/repomd.xml +``` + +In this case, update your subscription manager on each RHEL 8 host as follows: + +```bash +sudo subscription-manager refresh +``` diff --git a/content/controller/admin-guides/install/install-nginx-controller.md b/content/controller/admin-guides/install/install-nginx-controller.md new file mode 100644 index 000000000..a6c359768 --- /dev/null +++ b/content/controller/admin-guides/install/install-nginx-controller.md @@ -0,0 +1,666 @@ +--- +description: This guide explains how to install and update F5 NGINX Controller. +docs: DOCS-255 +doctypes: +- tutorial +tags: +- docs +title: Install NGINX Controller +toc: true +weight: 120 +--- + +## Overview + +F5 NGINX Controller is NGINX's control-plane solution that manages the NGINX data plane. Built on a modular architecture, NGINX Controller enables you to manage the entire lifecycle of NGINX Plus, whether it's deployed as a load balancer, API gateway, or a proxy in a service mesh environment. + +To get started, download and run the installer. The installer will: + +- Perform prerequisite checks on your system and prompt for any missing dependencies. +- Prompt you to accept the terms of service agreement for NGINX Controller. +- Ask you for a series of parameters including Database, SMTP, Admin user, and FQDN settings. +- Place configuration and log files in appropriate file locations on your host system. +- Add extra repositories to the default package manager like `apt` or `yum` and install required packages. +- Launch NGINX Controller. + +  + +--- + +### Open Source Software Dependencies + +NGINX Controller uses a number of open source software packages in the product. You can find information about these dependencies in the [NGINX Controller Technical Specifications]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}). + +  + +--- + +## Before You Begin + +Before installing NGINX Controller, review the following prerequisites. + +{{< important >}} +NGINX Controller should be deployed on a secure, internal network only. We strongly recommend against exposing the NGINX Controller API to the internet. +{{< /important >}} + +Things you'll need before installing NGINX Controller: + +- The `controller-installer-.tar.gz` package, downloaded from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads); + +- A license file for NGINX Controller, accessible via the [MyF5 Customer Portal](https://account.f5.com/myf5); + +- A dedicated environment (bare metal, VM, or cloud-hosted instance) on which to install NGINX Controller. For the supported Operating Systems and recommended specifications, see the [NGINX Controller Technical Specifications]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs" >}}) guide; + +  + +--- + +## Install NGINX Controller Prerequisites + +You can use the NGINX Controller `helper.sh prereqs` command to install the required system packages and Docker CE. + + + +| Options | Description | +|----------|-------------| +| `base` | Install the required Linux utilities. | +| `docker` | Install Docker CE. | +| `nfs` | Install NFS system packages. | + +To install all of the NGINX Controller prerequisites for your system at the same time, take the following steps: + +1. Download the NGINX Controller installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). + +1. Extract the installer package files: + + ```bash + tar xzf controller-installer-.tar.gz + ``` + +1. Run the helper script with the `prereqs` option: + + ```bash + cd controller-installer + ./helper.sh prereqs + ``` + +{{< note >}} +After you've installed NGINX Controller, you can install any of the prerequisites by running the following command: + + ```bash +/opt/nginx-controller/helper.sh prereqs [base|docker|nfs] +``` + +{{< /note >}} + +  + +--- + +### Linux Utilities + +The following Linux utilities are required by the installation script. The script will let you know if any of the utilities are missing. + +- `awk` +- `bash` (4.0 or later) +- `conntrack` +- `coreutils`: `base64`, `basename`, `cat`, `comm`, `dirname`, `head`, `id`, `mkdir`, `numfmt`, `sort`, `tee` +- `curl` or `wget` +- `ebtables` +- `envsubst` (provided by the `gettext` package) +- `ethtool` +- `getent` +- `grep` +- `gunzip` (provided by the `gzip` package) +- `iproute` +- `iptables` +- `jq` (1.5 or later) +- `less` +- `openssl` +- `sed` +- `socat` +- `tar` +- `util-linux` +- `yum-plugin-versionlock` on RedHat/CentOS + +  + +--- + +### Docker Requirements + +If you have Internet access, NGINX Controller will install Docker for you as part of the installation process. + +If you prefer to install Docker on the host yourself, install the following: + +- [Docker Community Edition (CE)](https://docs.docker.com/engine/install/) 18.09 +- [Containerd.io](https://containerd.io/) 1.2.10 + +If you are using Ubuntu-20.04 and want to install Docker on your own, choose the following versions instead: + +- [Docker Community Edition (CE)](https://docs.docker.com/engine/install/ubuntu/) 19.03 +- [Containerd.io](https://containerd.io/) 1.2.13 + +{{< see-also >}} +For instructions on installing Docker in offline scenarios on CentOS/RHEL 7, refer to the AskF5 [K84431427](https://support.f5.com/csp/article/K84431427) knowledge base article.{{< /see-also >}} + +{{< important >}} You need to enable Docker log rotation to ensure that the logs don't consume all the free disk space on the server. For instructions on how to enable Docker log rotation, see the Docker guides [Configure logging drivers](https://docs.docker.com/config/containers/logging/configure/) and [JSON File logging driver](https://docs.docker.com/config/containers/logging/json-file/).{{< /important >}}  + +#### Red Hat Enterprise Linux + +To create container images on Red Hat Enterprise Linux, Red Hat requires you to register and entitle the host computer on which you'll build them. In this case, the host is where you're installing NGINX Controller. Once the host is registered with Red Hat, you can install Docker from the Red Hat Enterprise Linux Extras repository. See the [Red Hat "Getting Started with Containers"](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#getting_docker_in_rhel_7) guide for instructions. + + +  + +--- + +### Kubernetes Requirements + +NGINX Controller ships with a required version of Kubernetes and will install Kubernetes for you. Be sure to install NGINX Controller on a dedicated node that **does not** already have Kubernetes configured. + +The following table lists the Kubernetes versions that are installed by NGINX Controller: + + +| NGINX Controller | Kubernetes | +|-----------------------|--------------------| +| v3.x | v1.15.5 | + +The [Kubernetes Pod DNS config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config) has a limit of six configured DNS search domain names. This is also the [`glibc` limit](https://man7.org/linux/man-pages/man5/resolv.conf.5.html). + +In NGINX Controller, Core-DNS creates three search domains that are determined at run-time and not in `/etc/resolv.conf`: + +- `.svc.cluster.local` +- `svc.cluster.local` +- `cluster.local ` + +In general, changing the settings in NGINX Controller's underlying Kubernetes cluster is not recommended. However, if you do change the cluster's Pod config to allow additional search domains, **you should not add more than three domains**. + + +  + +--- + +### PostgreSQL (Optional) + +When installing NGINX Controller, you can choose to have NGINX Controller install and manage a self-hosted -- also known as "embedded" -- [PostgreSQL](https://www.postgresql.org/) config database for you; this is the recommended implementation. If you choose to use the embedded, self-hosted config database, you can skip this section. + +Alternatively, you can install your own PostgreSQL database for the config database, which you manage; this is sometimes referred to as an "external config database" because it is externally managed by you. Continue reading if you're providing your own PostgreSQL database. + +Refer to the AskF5 KB article [K49481224](https://support.f5.com/csp/article/K49481224) for instructions on how to install PostgreSQL on CentOS 7 and Ubuntu 18.04 for use with NGINX Controller. + +- NGINX Controller supports the following versions of PostgreSQL: + + - PostgreSQL 12.x -- works with NGINX Controller 3.9 and later. + - PostgreSQL 9.5 -- works with NGINX Controller 3.0 and later. + +- The PostgreSQL database must be accessible from the NGINX Controller server. You can use a DNS-resolvable name or an IP address to connect to the database server (names in `/etc/hosts` are not allowed). +- Create the user with the `Create DB` permission. +- Configure PostgreSQL to allow SSL connections; client certificates should also be used for user authentication. + + **We strongly discourage disabling SSL for PostgreSQL for security reasons.** Consult the *Secure TCP/IP Connections with SSL* topic in the PostgreSQL manual for instructions and details: + + - [PostgreSQL 9.5](https://www.postgresql.org/docs/9.5/ssl-tcp.html) + - [PostgreSQL 12.x](https://www.postgresql.org/docs/12/ssl-tcp.html) + +- When installed on external NFS or EFS volumes, the config database should support a throughput of 2 MiB/s or greater. + + +  + +--- + +## Install NGINX Controller + +Install NGINX Controller on a dedicated node that **does not** already have Kubernetes configured. NGINX Controller does not support pre-configured Kubernetes implementations at this time. The installer for NGINX Controller will install and configure Kubernetes for you. + +{{< important >}}Before installing NGINX Controller, you must **disable swap on the host**; this is required by Kubernetes in order for the kubelet to work properly. Refer to your Linux distribution documentation for specific instructions for disabling swap for your system. For more information about this requirement, see the AskF5 knowledge base article [K82655201](https://support.f5.com/csp/article/K82655201) and the [kubeadm installation guide](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) in the Kubernetes documentation.{{< /important >}} + +{{< caution >}}**For RHEL 8 deployments**, complete the additional prerequisite steps in the [Installing NGINX on RHEL 8]({{< relref "/controller/admin-guides/install/install-nginx-controller-rhel-8.md" >}}) guide before installing NGINX Controller. RHEL 8 support is a **beta** feature.{{< /caution >}} + +To install NGINX Controller, take the following steps: + +1. Download the NGINX Controller installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). +1. Extract the installer package files: + + ```bash + tar xzf controller-installer-.tar.gz + ``` + +1. Run the install script: + + ```bash + cd controller-installer + ./install.sh + ``` + + {{< important >}}Installing NGINX Controller as `root` is **not supported** on multi-node clusters. Instead, create a user with `sudo` permission for installing and performing all operations with NGINX Controller. Further, NGINX Controller scripts should also run with this dedicated user; scripts shouldn't be run as `sudo`, `sudo su`, or as the `root` user directly.{{< /important >}} + + {{< note >}}If an HTTPS proxy is configured for the whole system, you should disable the proxy for the IP address and hostname of the host that you're running the NGINX Controller install script on. + For example, run the command `export NO_PROXY=,`. {{< /note >}} + + The installation script walks through a series of steps and asks for the following input: + + - **Config database configuration**. Specify whether to use an embedded, self-hosted PostgreSQL database for the config database, or if you want to provide your own external PostgreSQL database. If you choose to provide your own database, make sure you've reviewed the [PostgreSQL prerequisites](#postgresql-optional). + - **Config database volume type**: Specify the type of volume to use to store the config database: local, NFS, or AWS. We recommend choosing `local` only for demo and trial purposes. + + {{< see-also >}}Refer to the [NGINX Controller Technical Specifications Guide]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#local-or-external-storage" >}}) for more information about the volume options and requirements.{{< /see-also >}} + + - **Analytics database volume type**: Specify the type of volume to use to store the analytics database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. + - **EULA**: Read the end-user license agreement. Type either `y` to accept or `n` to exit. + - **SMTP** + - **SMTP Host**: Provide the host name or IP address of an SMTP server. This is used to send password recovery emails. For trial purposes, if you don't need to receive these communications, you can enter a value of "example.com" or something similar. + - **SMTP Port**: The port of the SMTP server. + - **SMTP Authentication**: Select `y` or `n` to authenticate when connecting to the SMTP server. + - **Use TLS for SMTP Communication**: Select `y` or `n` to use SSL for SMTP server connections. + - **Do not reply email address**: The sender's email address. For example, `donotreply@example.com`. + - **Admin** + - **First name**: The first name for the initial admin user. + - **Last name**: The last name for the initial admin user. + - **Email address**: The contact email address for the initial admin user. + - **Password**: The initial admin's password. Passwords must be 6-64 characters long and must include letters and digits. + - **FQDN**: Fully qualified domain name (FQDN) -- a resolvable domain name for the NGINX Controller server. The FQDN is used by Controller Agents when connecting to NGINX Controller. + {{< note >}}We recommend setting the FQDN to a internal address when possible, to avoid exposing the traffic between the Agent and NGINX Controller. This also reduces the external traffic in cloud environments. {{< /note >}} + - **SSL/TLS certificates**: Type `y` to generate and use self-signed certs for running NGINX Controller over HTTPS, or type `n` to provide your own certs. + + {{< important >}}If you provide your own SSL/TLS certificates, you'll need a complete certificate chain file, with the intermediate CA cert appended to the server cert; the server certificate must appear **before** the chained certificates in the combined file. If the certificate contains a wildcard Common Name (CN=*.example.com) it must also contain a Subject Alternate Name (SAN=nginx-controller.example.com). {{< /important >}} + +1. Log in to the NGINX Controller browser interface by navigating to the DNS, FQDN, or IP address of the NGINX Controller host, for example, `https:///login`. Use the admin email address and password that you provided during the installation process. + +1. Once the NGINX Controller installation has completed, you may safely delete the installer package that you downloaded and extracted. + + +  + +--- + +## License NGINX Controller + +To add a license to NGINX Controller, take the following steps: + +1. Go to `https:///platform/license` and log in. +1. In the **Upload a license** section, select an upload option: + + - **Upload license file** -- Locate and select your license file in the file explorer. + - **Paste your Association Token or license file** -- Paste your customer Association Token or the contents of your NGINX Controller license file. These are available on the [MyF5 Customer Portal](https://account.f5.com/myf5). + +1. Select **Save license**. + +{{< see-also >}} +To add a license using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a PUT request to the `/platform/license` endpoint. Provide your CAT or NGINX Controller license as a base64-encoded string in the JSON request body. +{{< /see-also >}} + + +  + +--- + +## Back Up Cluster Config and Encryption Keys + +After installing NGINX Controller, you should back up the cluster config and encryption keys. You'll need these if you ever need to restore the NGINX config database on top of a new NGINX Controller installation. + +- To back up the NGINX Controller cluster configuration and encryption keys: + + ```bash + /opt/nginx-controller/helper.sh cluster-config save + ``` + + The file is saved to `/opt/nginx-controller/cluster-config.tgz`. + +- To restore the cluster's config and encryption keys, take the following steps: + + ```bash + /opt/nginx-controller/helper.sh cluster-config load + ``` + +  + +--- + +## Manage the NGINX Controller Process + +You can use the `helper.sh` script to start, stop, restart, and check the status of the NGINX Controller process. + +``` bash +/opt/nginx-controller/helper.sh controller start +/opt/nginx-controller/helper.sh controller stop +/opt/nginx-controller/helper.sh controller restart +/opt/nginx-controller/helper.sh controller status +``` + +  + +--- + +## Update NGINX Controller + +To update the NGINX Controller software, take the steps below. When complete, you must also update the Controller Agent software on each monitored NGINX Plus instance. + +When updating NGINX Controller on a multi-node cluster, run the `update.sh` script on each node individually -- the order in which you update the nodes doesn't matter. + +{{< warning >}} Do not update the nodes in a multi-node cluster in parallel. Doing so may result in race conditions for certain jobs, such as database migrations, and may cause the cluster to become unavailable.{{< /warning >}} + +{{< caution >}} +We strongly recommend that you make a backup of the following information before proceeding, to avoid potential data and/or configuration loss: + +- [Back up the NGINX Controller databases]({{< relref "/controller/admin-guides/backup-restore" >}}). +- Back up the NGINX Controller cluster configuration and encryption keys. These are required if you need to restore the config database on top of a new installation of NGINX Controller. + + ```bash + /opt/nginx-controller/helper.sh cluster-config save + ``` + +- Back up the Controller Agent `agent.conf` file by copying it from its current location to a new location. This file is present on each NGINX Plus instance. + + ```bash + cp /etc/controller-agent/agent.conf + ``` + +{{< /caution >}} + +1. Download the installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). + +1. Extract the installer package files: + + ```bash + tar xzf controller-installer-.tar.gz + ``` + +1. Before updating, check the NGINX Controller status to confirm the installation is healthy. + + ```bash + ./helper.sh controller status + ``` + + Resolve any degradations before updating. + +1. Run the update script: + + ```bash + cd controller-installer + ./update.sh + ``` + + {{< note >}}If you're upgrading from an older version of NGINX Controller and you installed Controller as root user, use `--allow-with-root` flag when running an update script. {{< /note >}} + +1. If you are logged in to NGINX Controller using a web browser, sign out and log in again. + + - To sign out, select your username in the upper right-hand corner, and then select "Sign Out". For optimal performance, also flush your browser cache. + +{{< important >}} After you upgrade NGINX Controller, you also need to [update the NGINX Controller Agent]({{< relref "/controller/admin-guides/install/install-nginx-controller-agent" >}}) to the latest version. {{< /important >}} + +  + +--- + +## Uninstall NGINX Controller + +To uninstall NGINX Controller, run the uninstall script: + +```bash +/opt/nginx-controller/uninstall.sh +``` + +  + +--- + +## Install NGINX Controller Agent +{{< see-also >}} If you want to run the NGINX Controller Agent as a non-root user, follow the alternative instructions in the [Install NGINX Controller Agent for Non-root User]({{< relref "/controller/admin-guides/install/install-agent-non-root.md" >}}) guide instead of the steps provided in this section. {{< /see-also >}} + +Install the Controller Agent on each NGINX Plus instance that you want to manage and monitor. + +Take the following steps to add an instance to NGINX Controller: + +1. Open the NGINX Controller user interface and log in. +2. Select the NGINX Controller menu icon, then select **Infrastructure**. +3. On the **Infrastructure** menu, select **Instances** > **Overview**. +4. On the **Instances** overview page, select **Create**. +5. On the **Create Instance** page, select **Add an existing instance**. +6. Add a name for the instance. If you don't provide a name, the hostname of the instance is used by default. +7. To add the instance to an existing [Instance Group]({{< relref "/controller/infrastructure/instances/manage-instances.md#instance-groups" >}}), select an Instance Group from the list. Or to create an Instance Group, select **Create New**. +8. To add the instance to an existing Location, select a Location from the list. Or to create a Location, select **Create New**. + + {{< important >}} +Once set, the Location for an instance cannot be changed. If you need to change or remove the Location for an instance, you must [remove the instance from NGINX Controller]({{< relref "/controller/infrastructure/instances/manage-instances.md#delete-an-instance" >}}), and then add it back. + {{< /important >}} + + {{< important >}} +Instances and the instance groups they belong to should specify the same location; however, this requirement is not currently enforced. If different locations are specified, the instance group's location takes precedence. This is important to remember when [assigning locations to workload groups]({{< relref "/controller/app-delivery/manage-apps.md#workload-groups">}}). + {{< /important >}} + +9. (Optional) By default, registration of NGINX Plus instances is performed over a secure connection. To use self-signed certificates with the Controller Agent, select **Allow insecure server connections to NGINX Controller using TLS**. For security purposes, we recommend that you secure the Controller Agent with signed certificates when possible. +10. Use SSH to connect and log in to the NGINX instance that you want to connect to NGINX Controller. +11. Run the `curl` or `wget` command that's shown in the **Installation Instructions** section on the NGINX instance to download and install the Controller Agent package. When specified, the `-i` and `-l` options for the `install.sh` script refer to the instance name and Location, respectively. + + {{< note >}} + +Make sure you enter the commands to download and run the `install.sh` script on the NGINX Plus system, and not on the NGINX Controller. + +NGINX Controller 3.6 and earlier require Python 2.6 or 2.7. You'll be prompted to install Python if it's not installed already. Python is not required for NGINX Controller v3.7 and later. + + {{< /note >}} + +After a few minutes, the NGINX instance will appear on the **Instances** overview page. + + +  + +--- + +## Troubleshooting + +If NGINX Controller isn't working how you expect, see the knowledge base article [K03263142](https://support.f5.com/csp/article/K03263142) for installation troubleshooting procedures. + +### Create a Support Package + +You can create a support package for NGINX Controller that you can use to diagnose issues. + +{{< note >}} +You will need to provide a support package if you open a ticket with NGINX Support via the [MyF5 Customer Portal](https://account.f5.com/myf5). +{{< /note >}}  + +```bash +/opt/nginx-controller/helper.sh supportpkg [-o|--output ] [-s|--skip-db-dump] [-t|--timeseries-dump ] +``` + + + +| Options | Description | +|----------|-------------| +| `-o` \| `--output` | Save the support package file to ``. | +| `-s` \| `--skip-db-dump` | Don't include the database dump in the support package. | +| `-t` \| `--timeseries-dump ` | Include the last `` of timeseries data in the support package (default 12 hours). | + +Take the following steps to create a support package: + +1. Open a secure shell (SSH) connection to the NGINX Controller host and log in as an administrator. + +1. Run the `helper.sh` utility with the `supportpkg` option: + + ```bash + /opt/nginx-controller/helper.sh supportpkg + ``` + + The support package is saved to: + + `/var/tmp/supportpkg-.tar.gz` + + For example: + + `/var/tmp/supportpkg-20200127T063000PST.tar.gz` + +1. Run the following command on the machine where you want to download the support package to: + + ``` bash + scp @:/var/tmp/supportpkg-.tar.gz /local/path + ``` + +  + +#### Support Package Details + +The support package is a tarball that includes NGINX Controller configuration information, logs, and system command output. Sensitive information, including certificate keys, is not included in the support package. + +The support package gathers information from the following locations: + +```md +. +├── database +│   ├── common.dump - full dump of the common database +│   ├── common.dump_stderr - any errors when dumping the database +│   ├── common-apimgmt-api-client-api-keys.txt - contents of apimgmt_api_client_api_keys table from the common database +│   ├── common-apimgmt-api-client-groups.txt - contents of apimgmt_api_client_groups table from the common database +│   ├── common-email-verification.txt - contents of email_verification table from the common database +│   ├── common-oauth-clients.txt - contents of oauth_clients table from the common database +│   ├── common-settings-license.txt - contents of settings_license table from the common database +│   ├── common-settings-nginx-plus.txt - contents of settings_nginx_plus table from the common database +│   ├── common-table-size.txt - list of all tables and their size in the common database +│   ├── data-table-size.txt - list of all tables and their size in the data database +│   ├── postgres-database-size.txt - size of every database +│   ├── postgres-long-running-queries.txt - all queries running longer than 10 seconds +│   ├── system.dump - full dump of the system database +│   ├── system-account-limits.txt - contents of account_limits table from the system database +│   ├── system-accounts.txt - contents of accounts table from the system database +│   ├── system-deleted-accounts.txt - contents of deleted_accounts table from the system database +│   ├── system-deleted-users.txt - contents of deleted_users table from the system database +│   ├── system-users.txt - contents of users table from the system database +│   └── system-table-size.txt - list of all tables and their size in the system database +├── k8s - output of `kubectl cluster-info dump -o yaml` augmented with some extra info +│   ├── apiservices.txt - output of `kubectl get apiservice` +│   ├── kube-system - contents of the kube-system namespace +│   │   ├── coredns-5c98db65d4-6flb9 +│   │   │   ├── desc.txt - pod description +│   │   │   ├── logs.txt - current logs +│   │   │   └── previous-logs.txt - previous logs, if any +│   │   ├── ... +│   │   ├── daemonsets.yaml - list of daemonsets +│   │   ├── deployments.yaml - list of deployments +│   │   ├── events.yaml - all events in this namespace +│   │   ├── namespace.yaml - details of the namespace, including finalizers +│   │   ├── pods.txt - output of `kubectl get pods --show-kind=true -o wide` +│   │   ├── pods.yaml - list of all pods +│   │   ├── replicasets.yaml - list of replicasets +│   │   ├── replication-controllers.yaml - list of replication controllers +│   │   ├── resources.txt - all Kubernetes resources in this namespace +│   │   └── services.yaml - list of services +│   ├── nginx-controller - contents of the nginx-controller namespace +│   │   ├── apigw-8fb64f768-9qwcm +│   │   │   ├── desc.txt - pod description +│   │   │   ├── logs.txt - current logs +│   │   │   └── previous-logs.txt - previous logs, if any +│   │   ├── ... +│   │   ├── daemonsets.yaml - list of daemonsets +│   │   ├── deployments.yaml - list of deployments +│   │   ├── events.yaml - all events in this namespace +│   │   ├── namespace.yaml - details of the namespace, including finalizers +│   │   ├── pods.txt - output of `kubectl get pods --show-kind=true -o wide` +│   │   ├── pods.yaml - list of all pods +│   │   ├── replicasets.yaml - list of replicasets +│   │   ├── replication-controllers.yaml - list of replication controllers +│   │   ├── resources.txt - all Kubernetes resources in this namespace +│   │   ├── services.yaml - list of services +│   ├── nodes.txt - output of `kubectl describe nodes` +│   ├── nodes.yaml - list of nodes +│   ├── resources.txt - all non-namespaced Kubernetes resources (including PersistentVolumes) +│   └── version.yaml - Kubernetes version +├── logs - copy of /var/log/nginx-controller/ +│   └── nginx-controller-install.log +├── os +│   ├── cpuinfo.txt - output of `cat /proc/cpuinfo` +│   ├── df-h.txt - output of `df -h` +│   ├── df-i.txt - output of `df -i` +│   ├── docker-container-ps.txt - output of `docker container ps` +│   ├── docker-images.txt - output of `docker images` +│   ├── docker-info.txt - output of `docker info` +│   ├── docker-stats.txt - output of `docker stats --all --no-stream` +│   ├── docker-version.txt - output of `docker version` +│   ├── du-mcs.txt - output of `du -mcs /opt/nginx-controller/* /var/log /var/lib` +│   ├── env.txt - output of `env` +│   ├── firewall-cmd.txt - output of `firewall-cmd --list-all` +│   ├── free.txt - output of `free -m` +│   ├── hostname-all-fqdns.txt - output of `hostname --all-fqdns` +│   ├── hostname-fqdn.txt - output of `hostname --fqdn` +│   ├── hostname.txt - output of `hostname` +│   ├── hostsfile.txt - output of `cat /etc/hosts` +│   ├── ip-address.txt - output of `ip address` +│   ├── ip-neigh.txt - output of `ip neigh` +│   ├── ip-route.txt - output of `ip route` +│   ├── iptables-filter.txt - output of `iptables -L -n -v` +│   ├── iptables-mangle.txt - output of `iptables -L -n -v -t mangle` +│   ├── iptables-nat.txt - output of `iptables -L -n -v -t nat` +│   ├── iptables-save.txt - output of `iptables-save` +│   ├── journal-kubelet.txt - output of `journalctl -q -u kubelet --no-pager` +│   ├── lspci.txt - output of `lspci -vvv` +│   ├── netstat-nr.txt - output of `netstat -nr` +│   ├── ps-faux.txt - output of `ps faux` +│   ├── pstree.txt - output of `pstree` +│   ├── ps.txt - output of `ps aux --sort=-%mem` +│   ├── resolvconf.txt - output of `cat /etc/resolv.conf` +│   ├── selinux-mode.txt - output of `getenforce` +│   ├── ss-ltunp.txt - output of `ss -ltunp` +│   ├── swapon.txt - output of `swapon -s` +│   ├── sysctl.txt - output of `sysctl -a --ignore` +│   ├── systemd.txt - output of `journalctl -q --utc` +│   ├── top.txt - output of `top -b -o +%CPU -n 3 -d 1 -w512 -c` +│   ├── uname.txt - output of `uname -a` +│   ├── uptime.txt - output of `cat /proc/uptime` +│   └── vmstat.txt - output of `cat /proc/vmstat` +├── timeseries +│ ├── table-sizes.stat - stat table containing controller table sizes +│ ├── events.csv - events table dump in csv +│ ├── events.sql - events table schema +│ ├── metrics_1day.csv - metrics_1day table dump in csv +│ ├── metrics_1day.sql - metrics_1day table schema +│ ├── metrics_1hour.csv - metrics_1hour table dump in csv +│ ├── metrics_1hour.sql - metrics_1hour table schema +│ ├── metrics_5min.csv - metrics_5min table dump in csv +│ ├── metrics_5min.sql - metrics_5min table schema +│ ├── metrics.csv - metrics table dump in csv +│ ├── metrics.sql - metrics table schema +│ ├── system-asynchronous-metrics.stat - shows info about currently executing events or consuming resources +│ ├── system-events.stat - information about the number of events that have occurred in the system +│ ├── system-metrics.stat - system metrics +│ ├── system-parts.stat - information about parts of a table in the MergeTree family +│ ├── system-settings.stat - information about settings that are currently in use +│ └── system-tables.stat - information about all the tables +└── version.txt - Controller version information +``` + + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/nginx-controller-tech-specs.md b/content/controller/admin-guides/install/nginx-controller-tech-specs.md new file mode 100644 index 000000000..41d318a24 --- /dev/null +++ b/content/controller/admin-guides/install/nginx-controller-tech-specs.md @@ -0,0 +1,427 @@ +--- +description: Guidelines and recommendations for configuring F5 NGINX Controller. +docs: DOCS-256 +doctypes: +- reference +tags: +- docs +title: NGINX Controller Tech Specs +toc: true +weight: 100 +--- + +## Overview + +This guide lists the technical recommendations for F5 NGINX Controller v3 and NGINX Controller Agent. Review this guide before installing or updating NGINX Controller or NGINX Controller Agent. + +## Supported Distributions + +NGINX Controller, the NGINX Controller Agent, and the NGINX Controller Application Security Add-on support the following distributions and architectures. + +{{< see-also >}}Refer to the [NGINX Plus Technical Specifications](https://docs.nginx.com/nginx/technical-specs/) guide for the distributions that NGINX Plus supports.{{< /see-also >}} + +{{< bootstrap-table "table table-striped table-bordered" >}} + +|Distribution
and Version|NGINX Controller
(Control Plane)|Agent
(Data Plane)|ADC App. Sec.
(Data Plane)|APIM Adv. Sec.
(Data Plane)|Notes| +|--- |--- |--- |--- |--- |--- | +|Amazon Linux
2
(x86_64)| Not supported|v3.0+ |Not supported|Not supported| | +|Amazon Linux
2017.09+
(x86_64)| Not supported |v3.0+|Not supported |Not supported| | +|CentOS
6.5+
(x86_64)| Not supported |v3.0+| Not supported |Not supported| • CentOS 6.5 and later versions in the CentOS 6 family are partially supported.
• This distribution does not support AVRD.| +|CentOS
7.4+
(x86_64)|v3.0+|v3.0+ | v3.12+ |v3.19+| • CentOS 7.4 and later versions in the CentOS 7 family are supported.| +|Debian
8
(x86_64)| Not supported |v3.0–3.21|Not supported|Not supported|• This distribution does not support AVRD.| +|Debian
9
(x86_64)|v3.0+|v3.0–3.21 | v3.12+ |v3.19+ | | +|Debian
10
(x86_64)| Not supported |v3.17+ | v3.17+ |v3.19+| See the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/) for requirements for Debian 10. | +|Red Hat Enterprise Linux
6.5+| Not supported |v3.0+| Not supported | Not supported| • RHEL 6.5 and later versions in the RHEL 6 family are partially supported.| +|Red Hat Enterprise Linux
7.4+
(x86_64)|v3.5+|v3.5+ | v3.12+|v3.19+| • RHEL 7.4 and later versions in the RHEL 7 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | +|Red Hat Enterprise Linux
8.0+
(x86_64)|v3.22+|v3.22+ | v3.22+| Not supported | • RHEL 8.0 and later versions in the RHEL 8 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | +|Ubuntu
18.04 LTS
(x86_64)|v3.0+|v3.0+ |v3.13+|v3.19+| | +|Ubuntu
20.04 LTS
(x86_64)|v3.20+|v3.12+|v3.16.1+|v3.19+| | + +{{< /bootstrap-table >}} + + + + +#### Analytics, Visibility, and Reporting Daemon (AVRD) + +NGINX Controller v3.1 and later use an Analytics, Visibility, and Reporting daemon (AVRD) to aggregate and report app-centric metrics, which you can use to track and check the health of your apps. To learn more about these metrics, see the [NGINX Metrics Catalog]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) topic. + + +  + +--- + +## Storage Requirements + +The following table shows the minimum storage requirements we recommend for NGINX Controller. Your final storage requirements may differ depending on your environment, configuration, and the number of instances, apps, and APIs you're managing. Production deployments, for example, will require more storage than trial deployments. Contact your NGINX Controller sales associate if you have questions about sizing for your particular environment. + +We recommend using a local volume for the analytics and config databases for trial deployments, for simplicity's sake so you can get started using NGINX Controller right away. For production environments, we recommend using an external volume for the databases for resiliency. + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Resource | Path(s) | Minimum Storage | +|-|-|-| +| NGINX Controller | /opt/nginx-controller | 80 GB | +| Analytics database | /opt/nginx-controller/clickhouse_data | • 50 GB
• 150 GB if App Security is enabled | +| Config database | /opt/nginx-controller/postgres_data | 10 GB | +| Logs | • /var/log/nginx-controller
• /var/log/journal
• /var/log/pods
• /var/lib/docker/containers
• /var/lib/kubelet
• /var/lib/kubernetes| 15 GB cumulative | + +{{< /bootstrap-table >}} + + +  + +--- + +## Supported Deployment Environments + +You can deploy NGINX Controller v3 into the following environments: + +- Bare metal +- Public cloud: Amazon Web Services, Google Cloud Platform, Microsoft Azure +- Virtual Machine + +  + +--- + +## NGINX Plus Instances + +NGINX Controller, using the Controller Agent, can monitor and manage up to 100 [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) instances. When using Controller App Security, NGINX Controller can monitor and manage up to 30 NGINX Plus instances with NGINX App Protect installed. + + +NGINX Controller supports the following [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) versions: + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| NGINX Plus | NGINX Controller | NGINX Controller ADC | NGINX Controller APIM | +|------------|------------------|----------------------|-----------------------| +| R30 | Not supported | 3.22.9+ | Not supported | +| R29 | Not supported | 3.22.9+ | 3.19.6+ | +| R28 | Not supported | 3.22.6+ | 3.19.6+ | +| R27 | Not supported | 3.22.4+ | 3.19.6+ | +| R26 | Not supported | 3.22.2+ | 3.19.6+ | +| R25 | Not supported | 3.20.1+ | 3.19.2+ | +| R24 | 3.17+ | 3.20+ | 3.18+ | +| R23 | 3.12+ | 3.20.0 - 3.22.2 | 3.18+ | +| R22 | 3.5+ | 3.20.0 - 3.22.1 | 3.18+ | +| R21 | 3.5 - 3.12 | Not supported | Not supported | +| R20 | 3.0 - 3.12 | Not supported | Not supported | +| R19 | 2.6 - 3.5 | Not supported | Not supported | + +{{< /bootstrap-table >}} + + +  + +--- + +## NGINX App Protect Compatibility Matrix + +The App Security add-on for the NGINX Controller Application Delivery module is compatible with the versions of NGINX Plus and NGINX App Protect shown in the table below. New releases of NGINX Controller ADC support the last four versions of NGINX Plus at release time. + +{{< see-also >}} +Refer to [Using NGINX App Protect with NGINX Controller]({{< relref "controller/admin-guides/install/install-for-controller.md" >}}) for installation instructions and additional information. +{{< /see-also >}} + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| NGINX Controller version | NGINX App Protect version(s) | NGINX Plus version(s) | +|-------------------------------------|-------------------------------------------------------------------------------------------------|--------------------------------| +| NGINX Controller ADC v3.22.9 | v4.5
v4.3, v4.4
v4.0, v4.1, v4.2
v3.12, v3.11 | R30
R29
R28
R27 | +| NGINX Controller ADC v3.22.8 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | +| NGINX Controller ADC v3.22.7 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | +| NGINX Controller ADC v3.22.6 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | +| NGINX Controller ADC v3.22.5 | v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2 | R27
R26
R25
R24 | +| NGINX Controller ADC v3.22.4 | v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2 | R27
R26
R25
R24 | +| NGINX Controller ADC v3.22.3 | v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3 | R26
R25
R24
R23 | +| NGINX Controller ADC v3.22.2 | v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3 | R26
R25
R24
R23 | +| NGINX Controller ADC v3.22, v3.22.1 | v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | +| NGINX Controller ADC v3.21 | v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | +| NGINX Controller ADC v3.20.1 | v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | +| NGINX Controller ADC v3.20 | v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | +| NGINX Controller APIM v3.19.2 | v3.6
v3.5, v3.4 | R25
R24 | +| NGINX Controller APIM v3.19 | v3.5, v3.4 | R24 | +| NGINX Controller v3.18 | v3.5, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | +| NGINX Controller v3.17 | v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | +| NGINX Controller v3.16 | v3.1, v3.0, v2.3
v2.1.1 | R23
R22 | +| NGINX Controller v3.14, v3.15 | v3.0, v2.3
v2.1.1 | R23
R22 | +| NGINX Controller v3.13 | v2.3
v2.1.1 | R23
R22 | +| NGINX Controller v3.12 | v2.1.1 | R22 | + +{{< /bootstrap-table >}} + +--- + +## Supported Browsers + +NGINX Controller works best with the newest and the last prior version of these browsers with JavaScript, cookies, and SSL enabled: + +- [Google Chrome](https://www.google.com/chrome/) +- [Firefox](https://www.mozilla.org/en-US/firefox/new/) +- [Safari](https://support.apple.com/downloads/safari) +- [Internet Explorer](https://support.microsoft.com/en-us/help/17621/internet-explorer-downloads) and [Microsoft Edge](https://www.microsoft.com/en-us/edge) + +{{< important >}} +You may need to turn off any ad blockers while using the NGINX Controller user interface. + +In some cases, the NGINX Controller user interface may not display analytics or security events if an ad blocker is enabled. Refer to the AskF5 KB article [K48603454](https://support.f5.com/csp/article/K48903454) to learn more about this issue and how to resolve it. +{{< /important >}} + + +  + +--- + +## Hardware Specifications + +The following minimum hardware specifications are required for each node running NGINX Controller: + +- RAM: 8 GB RAM +- CPU: 8-Core CPU @ 2.40 GHz or similar +- Disk space: 155–255 GB free disk space. 255 GB of free space is recommended if NGINX Controller App Security is enabled. See the [Storage Requirements]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#storage-requirements" >}}) section for a categorized list of the storage requirements. + +The NGINX Controller Agent consumes as little memory and CPU as possible. CPU usage should be under 10%, and RSS memory consumption should be just a few dozen MBs. If you notice the NGINX Controller Agent consuming resources at a higher rate, you should [contact NGINX Support]({{< relref "/controller/support/contact-support.md" >}}) for assistance. + +  + +--- + +## NGINX Controller Database Requirements + +When installing NGINX Controller, you can choose the type of volume to use for the analytics and config databases. The types of volumes that are supported are: + +- [Local Storage](#local-storage) +- [NFS](#nfs) +- [AWS EBS](#aws-ebs) + +We recommend using a local volume for the analytics and config databases for trial deployments, for simplicity's sake so you can get started using NGINX Controller right away. For production environments, we recommend using an external volume for the databases for resiliency. + +  + +### Local Storage + +When using local storage for the analytics and/or config database, we recommend the following specs: + +- 100 IOPS +- 155–255 GB free disk space. 255 GB of free space is recommended if NGINX Controller App Security is enabled. See the [Storage Requirements]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#storage-requirements" >}}) section for a categorized list of the storage requirements. + +{{< tip >}} +To conserve IO and/or disk space, you can use a separate disk for the local storage directory `/opt/nginx-controller/clickhouse_data`. +{{< /tip >}} + +  + +### NFS + +To use NFS for external storage for the analytics and/or config database, consider the following: + +- Make certain that the NFS version used by the server is supported by the client system where you're installing NGINX Controller. +- If you're using NFS v4 file locking or Network Lock Manager (NLM) on the NFS server, make sure that the client system that's running your NGINX Controller has access to the mount point. +- Install the `nfs-common` (on Ubuntu/Debian) or `nfs-utils` (on CentOS/RedHat) package on all hosts on which NGINX Controller will be installed. +- The `no_root_squash` option must be set for the mount point on the NFS server. If this is not allowed, the owner of the path used for the analytics database must be set to `101:101` and owner of the path for config database must be set to `70:70`. +- The config database should support a throughput of 2 MiB/s or greater. + +  + +### AWS EBS + +{{< important >}} +If you plan to run NGINX Controller on AWS EC2 instances, we recommend using NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations); for example, the requirement to have EC2 instances and EBS volumes in the same Availability Zone. +{{< /important >}} + +If you are installing NGINX Controller on [AWS EC2 instances](https://aws.amazon.com/ec2/getting-started/) and plan to use EBS volumes for the analytics and/or config database, consider the following: + +You will need add an IAM role like that shown below. + +- IAM Role for [Single-Node Installation]({{< relref "/controller/admin-guides/install/install-nginx-controller.md" >}}) + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + +- IAM Role for [Multi-Node Installation]({{< relref "/controller/admin-guides/install/resilient-cluster-aws.md" >}}) + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + +  + +--- + +## Supported PostgreSQL Versions + +NGINX Controller supports the following versions of PostgreSQL: + +- PostgreSQL 12.x -- works with NGINX Controller 3.9 and later. +- PostgreSQL 9.5 -- works with NGINX Controller 3.0 and later. + +For a system monitoring **100 NGINX Plus instances**, we recommend at least **32 GB of database storage**. Database storage requirements can vary, depending on the number of NGINX Plus instances, components, published API specs, and the churn rate for configuration changes. For monitor-only implementations, the database storage needs are small; for API Management (APIM) and/or App Delivery Controller (ADC) implementations in production, the storage needs are greater. + +{{< important >}} +If you use PostgreSQL 12, we recommend disabling [Just-in-Time (JIT)](https://www.postgresql.org/docs/12/jit.html) compilation to improve NGINX Controller's performance. To disable JIT, edit the `postgresql.conf` file and set `jit=off`. +{{< /important >}} + + +  + +--- + +## Firewall/IP Settings + +Configure NGINX Controller with the following firewall settings: + +{{< bootstrap-table "table table-striped table-bordered" >}} + +|Port| Used by | Used for| +|---|---|---| +| 5432 TCP | NGINX Controller database | Incoming connections to the NGINX Controller database from the NGINX Controller host. This is the default PostgreSQL port. | +| 443 TCP | • NGINX Controller
• NGINX Controller licensing | • Incoming connections to NGINX Controller from a browser; for example, from an internal network and NGINX Plus instances
• Incoming and outgoing connections used to used to validate the entitlements for your NGINX Controller license | +| 8443 TCP | NGINX Controller | Incoming connections from NGINX Plus instances
You need to **open** port 8443 TCP if you're running **NGINX Controller v3.18.2 or earlier**| +| 8883 TCP | NGINX Controller licensing | Incoming and outgoing connections used to validate the entitlements for your NGINX Controller license
Port 8883 TCP needs to be **opened** only if you're running **NGINX Controller v3.15 or earlier**| + +{{< /bootstrap-table >}} + +If you have a firewall running on the NGINX Controller host, enable NAT (masquerade) and open the following ports. These ports are used for **internal traffic** only and don't need to be open to the outside: + +{{< bootstrap-table "table table-striped table-bordered" >}} + +|Port| Used by | Used for| +|---|---|---| +|2379 TCP
2380 TCP
6443 TCP|NGINX Controller|Incoming requests to the Kubernetes control plane; used for the Kubernetes API server and etcd| +|10250 TCP|NGINX Controller|Incoming requests to the Kubernetes worker node; used for the Kubelet API| +|10251 TCP|NGINX Controller|Incoming requests to the Kubernetes kube-scheduler; used for the pod scheduling| +|10252 TCP|NGINX Controller|Incoming requests to the Kubernetes kube-controller-manager; used for regulating the state of the system| +|8472 UDP|NGINX Controller|Used for pod-to-pod communication in multi-node resilient clusters| + +{{< /bootstrap-table >}} + +For more information about these ports, see the Kubernetes guide [Installing kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#check-required-ports). + + +  + +--- + +## Supported Python Versions + +NGINX Controller and the NGINX Controller Agent versions 3.6 and earlier require Python 2.6 or 2.7. Python is not needed for NGINX Controller or the NGINX Controller Agent versions 3.7 and later. + +  + +--- + +## Open-Source Licenses + +The list of open-source packages and their licenses used by NGINX Controller can be found in the downloaded file that is part of the NGINX Controller package. On your NGINX Controller host, see `controller-installer/files/license-controller.md`. + +In addition, see the AskF5 KB article [Third-party software for NGINX Controller controller-datacollection-components](https://support.f5.com/csp/article/K30028643) for third-party software packages that may be used by or distributed with controller-datacollection-components. This information is not included in the `license-controller.md` that's mentioned above. + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/resilient-cluster-aws.md b/content/controller/admin-guides/install/resilient-cluster-aws.md new file mode 100644 index 000000000..9efe83a77 --- /dev/null +++ b/content/controller/admin-guides/install/resilient-cluster-aws.md @@ -0,0 +1,334 @@ +--- +description: This guide explains how to deploy F5 NGINX Controller as a multi-node resilient. + cluster on AWS. +docs: DOCS-257 +doctypes: +- tutorial +tags: +- docs +title: Deploy NGINX Controller as a Resilient Cluster on AWS +toc: true +weight: 310 +--- + +## Overview + +Complete the steps in this guide to deploy F5 NGINX Controller as a resilient, three-node cluster on AWS. A multi-node cluster ensures that NGINX Controller stays up even if one of the control-plane hosts becomes unavailable. + +### Failure Tolerance + +To be resilient, a cluster requires three working nodes. That's two nodes for a quorum and one node for failure tolerance. + +If a node fails in a resilient cluster, NGINX Controller automatically redirects traffic to the other working nodes. A multi-node cluster is operational with only two nodes; however, a two-node cluster isn't resilient to further failures. If one of the nodes in a multi-node cluster becomes degraded or fails, you must take action **as soon as possible** to recover or replace the failed node or risk losing resiliency. + +{{< important >}}The failover time can take **up to 5 minutes** when a node fails. During this time, NGINX Controller may be unavailable while services are migrated and restarted. Resiliency will be restored once there are **three working nodes** in the cluster. +{{< /important >}} + +The following table shows how many nodes are needed for a cluster to have a quorum and what the failure tolerance is: + + + +| Cluster Size | Quorum | Failure Tolerance | +|--------------|--------|-------------------| +| 1 | 1 | 0 | +| 2 | 2 | 0 | +| 3 | 2 | 1 | + +Larger clusters aren't supported. + +  + +--- + +## Before You Begin + +### Implementation Considerations + +Before installing or configuring NGINX Controller as a multi-node cluster, review the following list of considerations to assist with planning: + +- Configuring NGINX Controller as a multi-node cluster on AWS requires **NGINX Controller 3.14 or later**. To upgrade from an earlier version, refer to the [Update NGINX Controller]({{< relref "/controller/admin-guides/install/install-nginx-controller.md#update-nginx-controller" >}}) steps for instructions. +- Data migration is not supported, so it's not possible to implement a multi-node cluster with local volumes without reinstalling NGINX Controller. +- If you plan to run NGINX Controller on AWS EC2 instances, we recommend using NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations); for example, the requirement to have EC2 instances and EBS volumes in the same Availability Zone. +- Cluster config changes are orchestrated by a primary control plane node that writes to the external config database. Each NGINX Controller control plane node hosts a set of services (pods) that read and write data. Only the node that hosts the pod that manages the config data writes to the external config database. + + + + +  + +--- + +### Prerequisites + +{{< important >}}If you plan to run NGINX Controller on AWS EC2 instances, we recommend you use NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations).{{< /important >}} + +Things you'll need before installing NGINX Controller as a resilient cluster: + +- Three hosts on which you can install NGINX Controller to create a cluster +- The `controller-installer-.tar.gz` package, which you can get from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). You need to upload and extract this tarball **on each host**. +- A license file for NGINX Controller +- A tool to send API requests, such as Postman or curl +- An external volume for the config database + + When installing NGINX Controller, you can choose to have NGINX Controller install and manage a self-hosted -- also known as "embedded" -- [PostgreSQL](https://www.postgresql.org/) database for you; this is the recommended implementation. Alternatively, you can [install your own PostgreSQL database for the config database]({{< relref "/controller/admin-guides/install/install-nginx-controller.md#postgresql-optional" >}}), which you manage; this is sometimes referred to as an "external config database" because it is externally managed by you. Regardless of whether you use an embedded or an externally managed config database, the config database must be on an external volume for resilient clusters. + +- An external volume for the analytics database + +  + +--- + +## Configure IAM Roles + +{{< important >}}If you plan to run NGINX Controller on AWS EC2 instances, we recommend using NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations); for example, the requirement to have EC2 instances and EBS volumes in the same Availability Zone.{{< /important >}} + +If you are installing NGINX Controller on [AWS EC2 instances](https://aws.amazon.com/ec2/getting-started/) and plan to use EBS volumes for the analytics and/or config database, you will need to add an IAM role like the one shown below. This will also allow the automatic creation of Elastic Load Balancers (ELBs). Additionally, for successful automatic creation of ELBs, all the EC2 instances that are or will be part of the cluster must be tagged with the following key-value pair: + `kubernetes.io/cluster/NGINX-CONTROLLER : owned` + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } + ] +} +``` + +  + +--- + +## Install NGINX Controller + +- Complete the steps in the [NGINX Controller Installation Guide]({{< relref "/controller/admin-guides/install/install-nginx-controller.md" >}}) to install NGINX Controller on the first node. + +  + +--- + +## License NGINX Controller + +- Follow the steps to [license NGINX Controller]({{< relref "/controller/platform/licensing-controller.md" >}}). + +  + +--- + +## Add Nodes to the Cluster + +Nodes are additional control-plane hosts that you can add to your cluster to improve uptime resilience. For a resilient cluster, you should have at least three nodes, of which **two nodes must always be operational**. + +{{< important >}} +When adding a third node to the cluster for the first time, NGINX Controller may become momentarily unavailable while the cluster is being created. For this reason, we recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. +{{< /important >}} + +Take the following steps to add a node to the cluster: + +1. Open the NGINX Controller web interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Cluster**. +1. On the **Cluster** overview page, select **Create Node**. +1. Add a name for the node. +1. (Optional) Add a description. +1. Add the hostname or IP address -- or both -- for the node. +1. Select **Save**. The new node appears in the list of nodes on the **Cluster** overview page with a `Configuring` status. +1. Choose the new node's name in the list, then select **View** (eye icon). A page displays with command-line instructions for adding the node. +1. Copy the `install.sh` command and join-key that are shown. +1. Open an SSH connection to the node that you're adding to the cluster. +1. (Optional) If you're adding a node that was previously deleted, uninstall NGINX Controller from the node if you haven't already, and then continue with the remaining steps in this procedure: + + ```bash + /opt/nginx-controller/uninstall.sh + ``` + +1. Upload and extract the `controller-installer-.tar.gz` tarball. +1. Run the `install.sh` command with the join-key that you copied in the previous step. If you get an error that the join-key has expired, you can get a new one by following the steps in this topic to add a node using the web interface or the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}). + + ```bash + cd controller-installer + ./install.sh --join-key + ``` + +1. After the installation finishes, the node status in the web interface changes to `Configured`. +1. Repeat these steps for each node that you want to add to the cluster. + +{{< see-also >}} +To add nodes to your cluster using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a POST request to the `/platform/nodes` endpoint. +{{< /see-also >}} + +  + +--- + +## Add Load Balancer Alias to FQDN + +You must add the hostname or IP address for the load balancer as a CNAME or A record for the domain that's used as the Fully Qualified Domain Name (FQDN) for NGINX Controller. + +To get the hostname or IP address for the load balancer using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a GET request to the `/platform/global` endpoint. + +  + +--- + +## Delete a Node + +There might be situations when you need to delete a node, either temporarily for maintenance or permanently to decommission a node. + +If you need to remove a node temporarily, follow the steps in the [Add Nodes to the Cluster](#add-nodes-to-the-cluster) topic when you are ready to re-add it. Make sure to uninstall NGINX Controller from the node before re-installing NGINX Controller with the new join-key. + +{{< important >}} +Deleting nodes can cause NGINX Controller to become momentarily unavailable while the cluster is being updated. For this reason, we recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. When deleting nodes, make sure that **at least two nodes are always operational**. If the cluster has fewer than two working nodes, NGINX Controller may become unresponsive, and you may not be able to add new nodes. +{{< /important >}} + +{{< see-also >}} +To delete nodes from your cluster using the [NGINX Controller API Reference]({{< relref "/controller/api/_index.md" >}}), send a DELETE request to the Nodes endpoint. +{{< /see-also >}} + +To delete a node from the cluster using the web interface: + +1. Open the NGINX Controller web interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Cluster**. +1. On the **Cluster** overview page, choose the node you want to delete, then select **Delete** (trash icon). +1. Select **Delete** to confirm. +1. To finish deleting a node from the cluster, uninstall NGINX Controller from the node: + + 1. SSH into the node that you're deleting from the cluster. + 1. Run the NGINX Controller uninstall script: + + ```bash + /opt/nginx-controller/uninstall.sh + ``` + +{{< see-also >}} +To delete nodes from your cluster using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a DELETE request to the `/platform/nodes` endpoint. +{{< /see-also >}} + +  + +--- + +## Replace a Failed Node + +To be resilient, a cluster requires three working nodes. That's two nodes for a quorum and one node for failure tolerance. + +If one of the nodes in a multi-node cluster becomes degraded or fails, you must take action **as soon as possible** to recover or replace the failed node or risk losing resiliency. + +To replace a failed node: + +1. [Delete the failed node](#delete-a-node). +1. [Add a new node](#add-nodes-to-a-cluster). + +  + +--- + +## Updating a Cluster + +When updating NGINX Controller on a multi-node cluster, run the `update.sh` script on each node individually -- the order in which you update the nodes doesn't matter. + +{{< warning >}}Do not update the nodes in a multi-node cluster in parallel. Doing so may result in race conditions for certain jobs, such as database migrations, and may cause the cluster to become unavailable.{{< /warning >}} + +{{< important >}} +Active users will be logged out from NGINX Controller during an update. We recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. +{{< /important >}} + +To update your cluster to a newer version of NGINX Controller, take the following steps: + +1. Before updating the cluster, [check each node's status]({{< relref "/controller/platform/manage-cluster.md#view-node-status" >}}) to confirm the nodes are healthy. Resolve any degradations before updating. +1. Download the new installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). + +1. Extract the installer package and save the contents to each node: + + ```bash + tar xzf controller-installer-.tar.gz + ``` + +1. Run the update script on each node -- the order in which you update the nodes doesn't matter: + + ```bash + cd controller-installer + ./update.sh + ``` + +  + +{{< versions "3.14" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/resilient-cluster-private-cloud.md b/content/controller/admin-guides/install/resilient-cluster-private-cloud.md new file mode 100644 index 000000000..cc9dbedf1 --- /dev/null +++ b/content/controller/admin-guides/install/resilient-cluster-private-cloud.md @@ -0,0 +1,367 @@ +--- +description: This guide explains how to deploy F5 NGINX Controller as a multi-node resilient + cluster on a private cloud. +docs: DOCS-258 +doctypes: +- tutorial +tags: +- docs +title: Deploy NGINX Controller as a Resilient Cluster on a Private Cloud +toc: true +weight: 300 +--- + +## Overview + +Complete the steps in this guide to deploy F5 NGINX Controller as a resilient, three-node cluster on your private cloud. A multi-node cluster ensures that NGINX Controller stays up even if one of the control-plane hosts becomes unavailable. + +The diagram below shows how the different objects in a multi-node NGINX Controller cluster relate to each other. The control nodes communicate with an embedded, self-hosted database that is stored on an external volume. The NGINX Controller Agent -- and NGINX Controller users -- can access the cluster via a load balancer or floating IP address that is associated with NGINX Controller's FQDN. If a node in the cluster becomes unavailable for any reason, traffic is re-routed automatically to an available node. + +{{< img src="/ctlr/img/multi-node-diagram.png" alt="Diagram showing the relationship of objects in a multi-node cluster." width="639" height="689" >}} + + +### Failure Tolerance + +To be resilient, a cluster requires three working nodes. That's two nodes for a quorum and one node for failure tolerance. + +If a node fails in a resilient cluster, NGINX Controller automatically redirects traffic to the other working nodes. A multi-node cluster is operational with only two nodes; however, a two-node cluster isn't resilient to further failures. If one of the nodes in a multi-node cluster becomes degraded or fails, you must take action **as soon as possible** to recover or replace the failed node or risk losing resiliency. + +{{< important >}}The failover time can take **up to 5 minutes** when a node fails. During this time, NGINX Controller may be unavailable while services are migrated and restarted. Resiliency will be restored once there are **three working nodes** in the cluster. +{{< /important >}} + +The following table shows how many nodes are needed for a cluster to have a quorum and what the failure tolerance is: + + + +| Cluster Size | Quorum | Failure Tolerance | +|--------------|--------|-------------------| +| 1 | 1 | 0 | +| 2 | 2 | 0 | +| 3 | 2 | 1 | + +Larger clusters aren't supported. + +  + +--- + +## Before You Begin + +### Implementation Considerations + +Before installing or configuring NGINX Controller as a multi-node cluster, review the following list of considerations to assist with planning: + +- Configuring NGINX Controller as a multi-node cluster on a private cloud requires **NGINX Controller 3.12 or later**. To upgrade from an earlier version, refer to the [Update NGINX Controller]({{< relref "/controller/admin-guides/install/install-nginx-controller.md#update-nginx-controller" >}}) steps for instructions. +- Data migration is not supported, so it's not possible to implement a multi-node cluster with local volumes without reinstalling NGINX Controller. +- If you plan to run NGINX Controller on AWS EC2 instances, we recommend using NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations); for example, the requirement to have EC2 instances and EBS volumes in the same Availability Zone. +- Cluster config changes are orchestrated by a primary control plane node that writes to the external config database. Each NGINX Controller control plane node hosts a set of services (pods) that read and write data. Only the node that hosts the pod that manages the config data writes to the external config database. + +### Prerequisites + +Things you'll need before installing NGINX Controller as a resilient cluster: + +- Three hosts on which you can install NGINX Controller to create a cluster +- The `controller-installer-.tar.gz` package, which you can get from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). You need to upload and extract this tarball **on each host**. +- A license file for NGINX Controller +- A tool to send API requests, such as Postman or curl +- An external volume for the config database + + When installing NGINX Controller, you can choose to have NGINX Controller install and manage a self-hosted -- also known as "embedded" -- [PostgreSQL](https://www.postgresql.org/) database for you; this is the recommended implementation. Alternatively, you can [install your own PostgreSQL database for the config database]({{< relref "/controller/admin-guides/install/install-nginx-controller.md#postgresql-optional" >}}), which you manage; this is sometimes referred to as an "external config database" because it is externally managed by you. Regardless of whether you use an embedded or an externally managed config database, the config database must be on an external volume for resilient clusters. + +- An external volume for the analytics database + +  + +--- + +## Install NGINX Controller + +- Complete the steps in the [NGINX Controller Installation Guide]({{< relref "/controller/admin-guides/install/install-nginx-controller.md" >}}) to install NGINX Controller on the first node. + +  + +--- + +## License NGINX Controller + +- Follow the steps to [license NGINX Controller]({{< relref "/controller/platform/licensing-controller.md" >}}). + +  + +--- + +## Add Nodes to the Cluster + +Nodes are additional control-plane hosts that you can add to your cluster to improve uptime resilience. For a resilient cluster, you should have at least three nodes, of which **two nodes must always be operational**. + +{{< important >}} +When adding a third node to the cluster for the first time, NGINX Controller may become momentarily unavailable while the cluster is being created. For this reason, we recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. +{{< /important >}} + +Take the following steps to add a node to the cluster: + +1. Open the NGINX Controller web interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Cluster**. +1. On the **Cluster** overview page, select **Create Node**. +1. Add a name for the node. +1. (Optional) Add a description. +1. Add the hostname or IP address -- or both -- for the node. +1. Select **Save**. The new node appears in the list of nodes on the **Cluster** overview page with a `Configuring` status. +1. Choose the new node's name in the list, then select **View** (eye icon). A page displays with command-line instructions for adding the node. +1. Copy the `install.sh` command and join-key that are shown. +1. Open an SSH connection to the node that you're adding to the cluster. +1. (Optional) If you're adding a node that was previously deleted, uninstall NGINX Controller from the node if you haven't already, and then continue with the remaining steps in this procedure: + + ```bash + /opt/nginx-controller/uninstall.sh + ``` + +1. Upload and extract the `controller-installer-.tar.gz` tarball. +1. Run the `install.sh` command with the join-key that you copied in the previous step. If you get an error that the join-key has expired, you can get a new one by following the steps in this topic to add a node using the web interface or the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}). + + ```bash + cd controller-installer + ./install.sh --join-key + ``` + +1. After the installation finishes, the node status in the web interface changes to `Configured`. +1. Repeat these steps for each node that you want to add to the cluster. + +{{< see-also >}} +To add nodes to your cluster using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a POST request to the `/platform/nodes` endpoint. +{{< /see-also >}} + +  + +--- + +## Set the Floating IP + +**For private cloud deployments**, you must set a floating IP to complete setting up your multi-node resilient cluster. + +A floating IP -- also called a virtual IP -- is a static, routable IPv4 address that improves service resiliency by allowing NGINX Controller to continue to receive traffic if a node becomes unavailable. The floating IP is assigned to one of the cluster nodes, and if the node fails, the floating IP is automatically transferred to another node. The floating IP should not be in any DHCP pool. + +{{< important>}} +The floating IP needs to be added as an A record for the domain that's used as the Fully Qualified Domain Name (FQDN) for NGINX Controller. + +NGINX Controller **does not support IPv6** addresses for the floating IP. +{{< /important >}} + +Take the following steps to add a floating IP for your private cloud cluster: + +1. Open the NGINX Controller web interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Cluster**. +1. On the **Cluster** overview page, in the **Cluster Configuration** section, select the edit icon (pencil). +1. Select the **Use Floating IP** toggle to turn it on. +1. Add an IP address for the floating IP. +1. Select **Save**. +1. Complete the steps to [update the FQDN](#update-the-fqdn) to use the floating IP. + +{{< see-also >}} +To set a floating IP using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a PATCH request to the `/platform/global` endpoint. +{{< /see-also >}} +  + +--- + +## Update the FQDN + +The Fully Qualified Domain Name (FQDN) should be a proper domain. The FQDN is used by Controller Agents to access NGINX Controller. It's also used to access the web interface. + +Updating the FQDN for NGINX Controller is a two-step process: + +1. Update the FQDN for NGINX Controller using the web interface or the REST API. +1. Update the Controller Agents to use the new FQDN. + +  + +### Update the FQDN for NGINX Controller + +To change the FQDN for NGINX Controller using the web interface, take the following steps: + +1. Open the NGINX Controller web interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Cluster**. +1. On the Cluster overview page, in the **Cluster Configuration** section, select the edit icon (pencil). +1. In the FQDN box, type the new FQDN that you want to use. If you've [set a floating IP](#set-the-floating-ip), use that value for the FQDN. +1. Select the **Update API Gateway SSL Certificate** toggle. +1. Select an option for updating the API Gateway cert: + + - **Paste**: Paste the cert and key contents in the respective boxes. + - **File**: Browse for and upload the cert and key files. + +1. Select **Save**. The cluster services will restart. During this time, the web interface will be briefly unavailable. +1. Follow the steps to [update the FQDN for Controller Agents](#update-the-fqdn-for-controller-agents). + +{{< see-also >}} +To change the FQDN for NGINX Controller using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a PATCH request to the `/platform/global` endpoint. +{{< /see-also >}} +  + +### Update the FQDN for Controller Agents + +To update the FQDN for Controller Agents, take the following steps on each instance where the Controller Agent is installed: + +1. Open the `/etc/controller-agent/agent.conf` file for editing. +1. Update the `api_url` value with the new FQDN: + + ```nginx + [cloud] + api_url = https://:8443/1.4 + ``` + +1. Save the changes. +1. Restart the Controller Agent: + + ```bash + sudo service controller-agent restart + ``` + + +  + +--- + +## Update the API Gateway SSL Certificate + +Take the following steps to update the API Gateway SSL certificate: + +1. Open the NGINX Controller web interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Cluster**. +1. On the **Cluster** overview page, in the **Cluster Configuration** section, select the edit icon (pencil). +1. Select the **Update API Gateway SSL Certificate** toggle. +1. Select an option for updating the cert: + + - **Paste**: Paste the cert and key contents in the boxes. + - **File**: Browse for and upload the cert and key files. + +1. Select **Save**. + +{{< see-also >}} +To update the API Gateway SSL certificate and key using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a PATCH request to the `/platform/global` endpoint. +{{< /see-also >}} + +  + +--- + +## View Node Status + +Take the following steps to view the status for a node: + +{{< see-also >}} +To view a node's status using the [NGINX Controller API Reference]({{< relref "/controller/api/_index.md" >}}), send a GET request to the Nodes endpoint. +{{< /see-also >}} + +1. Open the NGINX Controller web interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Cluster**. +1. On the **Cluster** overview page, choose the node you want to view details for, then select **View** (eye icon). A panel opens and displays the node's current condition, including any errors. If you're adding a node to the cluster, the node status panel shows the command-line instructions to follow to complete setting up the node. + +  + +--- + +## Delete a Node + +There might be situations when you need to delete a node, either temporarily for maintenance or permanently to decommission a node. + +If you need to remove a node temporarily, follow the steps in the [Add Nodes to the Cluster](#add-nodes-to-the-cluster) topic when you are ready to re-add it. Make sure to uninstall NGINX Controller from the node before re-installing NGINX Controller with the new join-key. + +{{< important >}} +Deleting nodes can cause NGINX Controller to become momentarily unavailable while the cluster is being updated. For this reason, we recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. When deleting nodes, make sure that **at least two nodes are always operational**. If the cluster has fewer than two working nodes, NGINX Controller may become unresponsive, and you may not be able to add new nodes. +{{< /important >}} + +{{< see-also >}} +To delete nodes from your cluster using the [NGINX Controller API Reference]({{< relref "/controller/api/_index.md" >}}), send a DELETE request to the Nodes endpoint. +{{< /see-also >}} + +To delete a node from the cluster using the web interface: + +1. Open the NGINX Controller web interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Cluster**. +1. On the **Cluster** overview page, choose the node you want to delete, then select **Delete** (trash icon). +1. Select **Delete** to confirm. +1. To finish deleting a node from the cluster, uninstall NGINX Controller from the node: + + 1. SSH into the node that you're deleting from the cluster. + 1. Run the NGINX Controller uninstall script: + + ```bash + /opt/nginx-controller/uninstall.sh + ``` + +{{< see-also >}} +To delete nodes from your cluster using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a DELETE request to the `/platform/nodes` endpoint. +{{< /see-also >}} + +  + +--- + +## Replace a Failed Node + +To be resilient, a cluster requires three working nodes. That's two nodes for a quorum and one node for failure tolerance. + +If one of the nodes in a multi-node cluster becomes degraded or fails, you must take action **as soon as possible** to recover or replace the failed node or risk losing resiliency. + +To replace a failed node: + +1. [Delete the failed node](#delete-a-node). +1. [Add a new node](#add-nodes-to-a-cluster). + +  + +--- + +## Updating a Cluster + +When updating NGINX Controller on a multi-node cluster, run the `update.sh` script on each node individually -- the order in which you update the nodes doesn't matter. + +{{< warning >}}Do not update the nodes in a multi-node cluster in parallel. Doing so may result in race conditions for certain jobs, such as database migrations, and may cause the cluster to become unavailable.{{< /warning >}} + +{{< important >}} +Active users will be logged out from NGINX Controller during an update. We recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. +{{< /important >}} + +To update your cluster to a newer version of NGINX Controller, take the following steps: + +1. Before updating the cluster, [check each node's status]({{< relref "/controller/platform/manage-cluster.md#view-node-status" >}}) to confirm the nodes are healthy. Resolve any degradations before updating. +1. Download the new installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). + +1. Extract the installer package and save the contents to each node: + + ```bash + tar xzf controller-installer-.tar.gz + ``` + +1. Run the update script on each node -- the order in which you update the nodes doesn't matter: + + ```bash + cd controller-installer + ./update.sh + ``` + +  + +{{< versions "3.12" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/try-nginx-controller-app-sec.md b/content/controller/admin-guides/install/try-nginx-controller-app-sec.md new file mode 100644 index 000000000..c65534569 --- /dev/null +++ b/content/controller/admin-guides/install/try-nginx-controller-app-sec.md @@ -0,0 +1,307 @@ +--- +description: This quick-start tutorial shows you how to get started using F5 NGINX Controller + with the Application Security Add-on. +docs: DOCS-259 +doctypes: +- tutorial +tags: +- docs +title: Trial NGINX Controller with App Security +toc: true +weight: 115 +--- + +## Overview + +This quick-start tutorial shows you how to get started using F5 NGINX Controller with the Application Security Add-on ("App Security"). The App Security add-on to the NGINX Controller Application Delivery Module enables a web application firewall (WAF) that you can use to protect your apps. + +Take the steps in this guide to deploy NGINX Controller with App Security and deploy NGINX App Protect with NGINX Plus as a data plane instance for use with NGINX Controller. + +{{< caution >}}In this tutorial, NGINX Controller will install an embedded, self-hosted PostgreSQL database suitable for demo and trial purposes only. **These instructions are not meant for use in production environments**.{{< /caution >}} + +{{< note >}}If you already have an active NGINX Controller trial and want to add App Security to it, you can start with the [Install NGINX App Protect with NGINX Plus](#install-nginx-app-protect-with-nginx-plus) section. {{< /note >}} + +  + +--- + +## Technical Requirements + +Be sure to review the [NGINX Controller Technical Specifications Guide]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}) for additional requirements for your desired distribution and configuration. + +### Supported Distributions + +NGINX Controller with App Security supports the following distributions for deploying NGINX App Protect: + +- CentOS 7 (7.4+) +- Red Hat Enterprise Linux 7 (7.4+) +- Debian 9 +- Ubuntu 18.04 LTS, Ubuntu 20.04 LTS + +### Hardware Specs + +The following minimum hardware specifications are required for each node running NGINX Controller: + +- RAM: 8 GB RAM +- CPU: 8-Core CPU @ 2.40 GHz or similar +- Disk space: 155–255 GB free disk space. 255 GB of free space is recommended if NGINX Controller App Security is enabled. See the [Storage Requirements]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#storage-requirements" >}}) section for a categorized list of the storage requirements. + +### Supported NGINX Versions + +The App Security add-on for the NGINX Controller Application Delivery module is compatible with the versions of NGINX Plus and NGINX App Protect shown in the table below. New releases of NGINX Controller ADC support the last four versions of NGINX Plus at release time. + +{{< see-also >}} +Refer to [Using NGINX App Protect with NGINX Controller]({{< relref "controller/admin-guides/install/install-for-controller.md" >}}) for installation instructions and additional information. +{{< /see-also >}} + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| NGINX Controller version | NGINX App Protect version(s) | NGINX Plus version(s) | +|-------------------------------------|-------------------------------------------------------------------------------------------------|--------------------------------| +| NGINX Controller ADC v3.22.9 | v4.5
v4.3, v4.4
v4.0, v4.1, v4.2
v3.12, v3.11 | R30
R29
R28
R27 | +| NGINX Controller ADC v3.22.8 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | +| NGINX Controller ADC v3.22.7 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | +| NGINX Controller ADC v3.22.6 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | +| NGINX Controller ADC v3.22.5 | v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2 | R27
R26
R25
R24 | +| NGINX Controller ADC v3.22.4 | v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2 | R27
R26
R25
R24 | +| NGINX Controller ADC v3.22.3 | v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3 | R26
R25
R24
R23 | +| NGINX Controller ADC v3.22.2 | v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3 | R26
R25
R24
R23 | +| NGINX Controller ADC v3.22, v3.22.1 | v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | +| NGINX Controller ADC v3.21 | v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | +| NGINX Controller ADC v3.20.1 | v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | +| NGINX Controller ADC v3.20 | v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | +| NGINX Controller APIM v3.19.2 | v3.6
v3.5, v3.4 | R25
R24 | +| NGINX Controller APIM v3.19 | v3.5, v3.4 | R24 | +| NGINX Controller v3.18 | v3.5, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | +| NGINX Controller v3.17 | v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | +| NGINX Controller v3.16 | v3.1, v3.0, v2.3
v2.1.1 | R23
R22 | +| NGINX Controller v3.14, v3.15 | v3.0, v2.3
v2.1.1 | R23
R22 | +| NGINX Controller v3.13 | v2.3
v2.1.1 | R23
R22 | +| NGINX Controller v3.12 | v2.1.1 | R22 | + +{{< /bootstrap-table >}} + +  + +--- + +## Sign Up for a Trial License + +{{< note >}}If you already have an active NGINX Controller trial instance that you want to add App Security to, you can skip this section.{{< /note >}} + +First, you need to sign up for a trial license for NGINX Controller. The trial includes access to NGINX Plus, the NGINX Controller Application Delivery module, and the Application Security add-on. + +1. Go to [MyF5](https://account.f5.com/myf5) and create a new account. +1. Verify your account and log in to MyF5. +1. On the MyF5 landing page, activate the NGINX Controller free trial. +1. On the MyF5 **Trials** page, select Launch Your Trial. +1. Download the NGINX Controller package. +1. Make note of your Association Token. You will use this to [license your NGINX Controller instance]({{< relref "/controller/platform/licensing-controller.md#add-a-license-to-nginx-controller" >}}). + + +  + +--- + +## Install NGINX Controller Prerequisites + +{{< note >}}If you already have an active NGINX Controller trial instance that you want to add App Security to, you can skip this section.{{< /note >}} + +{{< include "controller/helper-script-prereqs.md" >}} + +  + +--- + +## Install NGINX Controller + +{{< note >}}If you already have an active NGINX Controller trial instance that you want to add App Security to, you can skip this section.{{< /note >}} + +Install NGINX Controller on a dedicated node that **does not** already have Kubernetes configured. NGINX Controller does not support pre-configured Kubernetes implementations at this time. The installer for NGINX Controller will install and configure Kubernetes for you. + +{{< important >}}Before installing NGINX Controller, you must **disable swap on the host**; this is required by Kubernetes in order for the kubelet to work properly. Refer to your Linux distribution documentation for specific instructions for disabling swap for your system. For more information about this requirement, see the AskF5 knowledge base article [K82655201](https://support.f5.com/csp/article/K82655201) and the [kubeadm installation guide](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) in the Kubernetes documentation.{{< /important >}} + +{{< caution >}}**For RHEL 8 deployments**, complete the additional prerequisite steps in the [Installing NGINX on RHEL 8]({{< relref "/controller/admin-guides/install/install-nginx-controller-rhel-8.md" >}}) guide before installing NGINX Controller. RHEL 8 support is a **beta** feature.{{< /caution >}} + +To install NGINX Controller, take the following steps: + +1. Download the NGINX Controller installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). +1. Extract the installer package files: + + ```bash + tar xzf controller-installer-.tar.gz + ``` + +1. Run the installation script: + + ```bash + cd controller-installer + ./install.sh + ``` + +1. When prompted to use an embedded config DB, type `y`. + +1. The installation script walks through a series of steps and asks for the following inputs: + + - **Config database volume type**: Specify the type of volume to use to store the config database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. + + {{< see-also >}}Refer to the [NGINX Controller Technical Specifications Guide]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#local-or-external-storage" >}}) for more information about the volume options and requirements.{{< /see-also >}} + + - **Analytics database volume type**: Specify the type of volume to use to store the analytics database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. + - **EULA**: Read the end-user license agreement. Type either `y` to accept or `n` to exit. + - **SMTP** + - **SMTP Host**: Provide the host name or IP address of an SMTP server. This is used to send password recovery emails. For trial purposes, if you don't need to receive these communications, you can enter a value of "example.com" or something similar. + - **SMTP Port**: The port of the SMTP server. + - **SMTP Authentication**: Select `y` or `n` to authenticate when connecting to the SMTP server. + - **Use TLS for SMTP Communication**: Select `y` or `n` to use SSL for SMTP server connections. + - **Do not reply email address**: The sender's email address. For example, `donotreply@example.com`. + - **Admin** + - **First name**: The first name for the initial admin user. + - **Last name**: The last name for the initial admin user. + - **Email address**: The contact email address for the initial admin user. + - **Password**: The initial admin's password. Passwords must be 6-64 characters long and must include letters and digits. + - **FQDN**: Fully qualified domain name (FQDN) -- a resolvable domain name for the NGINX Controller server. You can use the FQDN to access the NGINX Controller web interface. + Additionally, the FQDN is used by Controller Agents when connecting to NGINX Controller. + - **SSL/TLS certificates**: Type `y` to generate and use self-signed certs for running NGINX Controller over HTTPS, or type `n` to provide your own certs. + + {{< important >}} +If you provide your own SSL/TLS certificates, you'll need a complete certificate chain file, with the intermediate CA cert appended to the server cert; the server certificate must appear **before** the chained certificates in the combined file. + {{< /important >}} + +1. Log in to NGINX Controller at `https:///login`. Use the admin email address and password that you provided during the installation process. + +1. Once NGINX Controller is installed, you may safely delete the installer package that you downloaded and extracted. + +  + +--- + +## License NGINX Controller + +To add a license to NGINX Controller, take the following steps: + +1. Go to `https:///platform/license` and log in. +1. In the **Upload a license** section, select an upload option: + + - **Upload license file** -- Locate and select your license file in the file explorer. + - **Paste your Association Token or license file** -- Paste your customer Association Token or the contents of your NGINX Controller license file. These are available on the [MyF5 Customer Portal](https://account.f5.com/myf5). + +1. Select **Save license**. + +{{< see-also >}} +To add a license using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a PUT request to the `/platform/license` endpoint. Provide your CAT or NGINX Controller license as a base64-encoded string in the JSON request body. +{{< /see-also >}} + + +  + +--- + +## Install NGINX App Protect with NGINX Plus + +[NGINX App Protect](https://www.nginx.com/products/nginx-app-protect/) is the security data plane for NGINX Controller App Security. Your NGINX App Protect installation will include NGINX Plus. + +{{< important >}} +If you are adding App Security to an existing NGINX Controller trial, we recommend that you take the steps in this section to deploy a new NGINX App Protect instance, rather than adding the App Protect module to an existing NGINX Plus instance. + +NGINX Controller App Security is supported for use with a limited subset of the OS distributions that are supported by the NGINX Controller Agent and NGINX Plus. If you are planning to add NGINX App Protect to an existing NGINX Plus instance, be sure to check the [Supported Distributions](#supported-distributions) section above to verify that your NGINX Plus instance supports NGINX App Protect. +{{< /important >}} + +### Prerequisites + +- Be sure to review the [NGINX Plus Technical Specifications](https://docs.nginx.com/nginx/technical-specs/) for the requirements for your distribution and desired configuration. +- You'll need the NGINX Plus certificate and public key files (`nginx-repo.crt` and `nginx-repo.key`) when installing NGINX App Protect. If you don't have these files, you can use the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}) to download them. + +#### Download the NGINX App Protect Cert and Key + +Take the steps below to download the cert and key files by using the NGINX Controller REST API. + +The NGINX Controller API uses session cookies to authenticate requests. The session cookie is returned in response to a `GET /api/v1/platform/login` request. See the Login endpoint in the [NGINX Controller API Reference]({{< relref "/controller/api/_index.md" >}}) documentation for information about session cookie timeouts and invalidation. + +{{< tip >}} +You can send a GET request to the login endpoint to find the status of the session token. +{{< /tip >}} + +For example: + +- Login and capture the session cookie: + + ```curl + curl -c cookie.txt -X POST --url 'https:///api/v1/platform/login' --header 'Content-Type: application/json' --data '{"credentials": {"type": "BASIC","username": "","password": ""}}' + ``` + +- Use the session cookie to authenticate and get the session status: + + ```curl + curl -b cookie.txt -c cookie.txt -X GET --url 'https:///api/v1/platform/login' + ``` + + +
+ +To use the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}) to download your NGINX Plus certificate and key bundle as a gzip or JSON file, send a GET request to the `/platform/licenses/nginx-plus-licenses/controller-provided` endpoint. + +For example: + +- Download JSON file: + + ```bash + curl -b cookie.txt -c cookie.txt --header 'Content-Type: application/json' -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.json + ``` + +- Download GZIP file: + + ```bash + curl -b cookie.txt -c cookie.txt -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.gz + ``` + +{{< note >}} +If you are using a self-signed certificate you will need to add `-k` (allow insecure connections) to your curl command to be able to download your NGINX Plus certificate and key bundle. +{{< /note >}} + + +Once you have downloaded your certificate and key bundle you will need to expand the `.gz` file to get your certificate and key pair. + +For example: + +```bash +gunzip nginx-plus-certs.gz +``` + +### Deploy NGINX App Protect + +
+ +Install NGINX App Protect on a host accessible by your NGINX Controller instance by following the appropriate steps for your operating system in the [Using NGINX App Protect with NGINX Controller]({{< relref "controller/admin-guides/install/install-for-controller.md" >}}) guide. + +{{< note >}} +If you install NGINX App Protect by using any of the OS-specific install guides, **do not make changes to the `nginx.conf` file**. +The NGINX Controller Agent manages `nginx.conf` settings and will make the appropriate adjustments for you. +{{< /note >}} + +
+ +  + +--- + +## Add the NGINX App Protect Instance to NGINX Controller + +{{< include "controller/add-existing-instance.md" >}} + +  + +--- + +## What's Next + +You should now be ready to start your NGINX Controller with App Security trial. Refer to the following topics to get started: + +- [Configure the NGINX Controller Agent]({{< relref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) +- [Set Up Metrics Collection]({{< relref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) +- [Forward Metrics Data to an External Service]({{< relref "/controller/analytics/forwarders/_index.md" >}}) +- [Set up NGINX Controller Services]({{< relref "/controller/services/overview.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/try-nginx-controller.md b/content/controller/admin-guides/install/try-nginx-controller.md new file mode 100644 index 000000000..03d15b816 --- /dev/null +++ b/content/controller/admin-guides/install/try-nginx-controller.md @@ -0,0 +1,281 @@ +--- +description: This quick-start tutorial shows you how to get started using F5 NGINX Controller + with NGINX Plus. +docs: DOCS-260 +doctypes: +- tutorial +tags: +- docs +title: Trial NGINX Controller with NGINX Plus +toc: true +weight: 110 +--- + +## Overview + +This quick-start tutorial shows you how to get started using F5 NGINX Controller with NGINX Plus. + +{{< caution >}}In this tutorial, NGINX Controller will install an embedded, self-hosted PostgreSQL database suitable for demo and trial purposes only. **These instructions are not meant for use in production environments**.{{< /caution >}} + +{{< see-also >}}If you want to try out NGINX Controller with the Application Security add-on, refer to [Trial NGINX Controller with App Security]({{< relref "/controller/admin-guides/install/try-nginx-controller-app-sec.md" >}}).{{< /see-also >}} + +  + +--- + +## Technical Requirements + +Make sure to review the [NGINX Controller Technical Specifications Guide]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}) for the requirements for your distribution and desired configuration. + +### Supported Distributions + +NGINX Controller, the NGINX Controller Agent, and the NGINX Controller Application Security Add-on support the following distributions and architectures. + +{{< see-also >}}Refer to the [NGINX Plus Technical Specifications](https://docs.nginx.com/nginx/technical-specs/) guide for the distributions that NGINX Plus supports.{{< /see-also >}} + +{{< bootstrap-table "table table-striped table-bordered" >}} + +|Distribution
and Version|NGINX Controller
(Control Plane)|Agent
(Data Plane)|ADC App. Sec.
(Data Plane)|APIM Adv. Sec.
(Data Plane)|Notes| +|--- |--- |--- |--- |--- |--- | +|Amazon Linux
2
(x86_64)| Not supported|v3.0+ |Not supported|Not supported| | +|Amazon Linux
2017.09+
(x86_64)| Not supported |v3.0+|Not supported |Not supported| | +|CentOS
6.5+
(x86_64)| Not supported |v3.0+| Not supported |Not supported| • CentOS 6.5 and later versions in the CentOS 6 family are partially supported.
• This distribution does not support AVRD.| +|CentOS
7.4+
(x86_64)|v3.0+|v3.0+ | v3.12+ |v3.19+| • CentOS 7.4 and later versions in the CentOS 7 family are supported.| +|Debian
8
(x86_64)| Not supported |v3.0–3.21|Not supported|Not supported|• This distribution does not support AVRD.| +|Debian
9
(x86_64)|v3.0+|v3.0–3.21 | v3.12+ |v3.19+ | | +|Debian
10
(x86_64)| Not supported |v3.17+ | v3.17+ |v3.19+| See the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/) for requirements for Debian 10. | +|Red Hat Enterprise Linux
6.5+| Not supported |v3.0+| Not supported | Not supported| • RHEL 6.5 and later versions in the RHEL 6 family are partially supported.| +|Red Hat Enterprise Linux
7.4+
(x86_64)|v3.5+|v3.5+ | v3.12+|v3.19+| • RHEL 7.4 and later versions in the RHEL 7 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | +|Red Hat Enterprise Linux
8.0+
(x86_64)|v3.22+|v3.22+ | v3.22+| Not supported | • RHEL 8.0 and later versions in the RHEL 8 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | +|Ubuntu
18.04 LTS
(x86_64)|v3.0+|v3.0+ |v3.13+|v3.19+| | +|Ubuntu
20.04 LTS
(x86_64)|v3.20+|v3.12+|v3.16.1+|v3.19+| | + +{{< /bootstrap-table >}} + + + +#### Analytics, Visibility, and Reporting Daemon (AVRD) + +NGINX Controller v3.1 and later use an Analytics, Visibility, and Reporting daemon (AVRD) to aggregate and report app-centric metrics, which you can use to track and check the health of your apps. To learn more about these metrics, see the [NGINX Metrics Catalog]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) topic. + +### Hardware Specs + +The following minimum hardware specifications are required for each node running NGINX Controller: + +- RAM: 8 GB RAM +- CPU: 8-Core CPU @ 2.40 GHz or similar +- Disk space: 155–255 GB free disk space. 255 GB of free space is recommended if NGINX Controller App Security is enabled. See the [Storage Requirements]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#storage-requirements" >}}) section for a categorized list of the storage requirements. + +### Supported NGINX Plus Versions + +NGINX Controller supports the following [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) versions: + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| NGINX Plus | NGINX Controller | NGINX Controller ADC | NGINX Controller APIM | +|------------|------------------|----------------------|-----------------------| +| R30 | Not supported | 3.22.9+ | Not supported | +| R29 | Not supported | 3.22.9+ | 3.19.6+ | +| R28 | Not supported | 3.22.6+ | 3.19.6+ | +| R27 | Not supported | 3.22.4+ | 3.19.6+ | +| R26 | Not supported | 3.22.2+ | 3.19.6+ | +| R25 | Not supported | 3.20.1+ | 3.19.2+ | +| R24 | 3.17+ | 3.20+ | 3.18+ | +| R23 | 3.12+ | 3.20.0 - 3.22.2 | 3.18+ | +| R22 | 3.5+ | 3.20.0 - 3.22.1 | 3.18+ | +| R21 | 3.5 - 3.12 | Not supported | Not supported | +| R20 | 3.0 - 3.12 | Not supported | Not supported | +| R19 | 2.6 - 3.5 | Not supported | Not supported | + +{{< /bootstrap-table >}} + +--- + +## Sign Up for a Trial License + +First, you need to sign up for a trial license for NGINX Controller. The trial includes access to NGINX Plus, the NGINX Controller Application Delivery module, and the Application Security add-on. + +1. Go to [MyF5](https://account.f5.com/myf5) and create a new account. +1. Verify your account and log in to MyF5. +1. On the MyF5 landing page, activate the NGINX Controller free trial. +1. On the MyF5 **Trials** page, select Launch Your Trial. +1. Download the NGINX Controller package. +1. Make note of your Association Token. You will use this to [license your NGINX Controller instance]({{< relref "/controller/platform/licensing-controller.md#add-a-license-to-nginx-controller" >}}). + +  + +--- + +## Install NGINX Controller Prerequisites + +{{< include "controller/helper-script-prereqs.md" >}} + +  + +--- + +## Install NGINX Controller + +Install NGINX Controller on a dedicated node that **does not** already have Kubernetes configured. NGINX Controller does not support pre-configured Kubernetes implementations at this time. The installer for NGINX Controller will install and configure Kubernetes for you. + +{{< important >}}Before installing NGINX Controller, you must **disable swap on the host**; this is required by Kubernetes in order for the kubelet to work properly. Refer to your Linux distribution documentation for specific instructions for disabling swap for your system. For more information about this requirement, see the AskF5 knowledge base article [K82655201](https://support.f5.com/csp/article/K82655201) and the [kubeadm installation guide](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) in the Kubernetes documentation.{{< /important >}} + +{{< caution >}}**For RHEL 8 deployments**, complete the additional prerequisite steps in the [Installing NGINX on RHEL 8]({{< relref "/controller/admin-guides/install/install-nginx-controller-rhel-8.md" >}}) guide before installing NGINX Controller. RHEL 8 support is a **beta** feature.{{< /caution >}} + +To install NGINX Controller, take the following steps: + +1. Download the NGINX Controller installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). +1. Extract the installer package files: + + ```bash + tar xzf controller-installer-.tar.gz + ``` + +1. Run the installation script: + + ```bash + cd controller-installer + ./install.sh + ``` + +1. When prompted to use an embedded config DB, type `y`. + +1. The installation script walks through a series of steps and asks for the following inputs: + + - **Config database volume type**: Specify the type of volume to use to store the config database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. + + {{< see-also >}}Refer to the [NGINX Controller Technical Specifications Guide]({{< relref "/controller/admin-guides/install/nginx-controller-tech-specs.md#local-or-external-storage" >}}) for more information about the volume options and requirements.{{< /see-also >}} + + - **Analytics database volume type**: Specify the type of volume to use to store the analytics database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. + - **EULA**: Read the end-user license agreement. Type either `y` to accept or `n` to exit. + - **SMTP** + - **SMTP Host**: Provide the host name or IP address of an SMTP server. This is used to send password recovery emails. For trial purposes, if you don't need to receive these communications, you can enter a value of "example.com" or something similar. + - **SMTP Port**: The port of the SMTP server. + - **SMTP Authentication**: Select `y` or `n` to authenticate when connecting to the SMTP server. + - **Use TLS for SMTP Communication**: Select `y` or `n` to use SSL for SMTP server connections. + - **Do not reply email address**: The sender's email address. For example, `donotreply@example.com`. + - **Admin** + - **First name**: The first name for the initial admin user. + - **Last name**: The last name for the initial admin user. + - **Email address**: The contact email address for the initial admin user. + - **Password**: The initial admin's password. Passwords must be 6-64 characters long and must include letters and digits. + - **FQDN**: Fully qualified domain name (FQDN) -- a resolvable domain name for the NGINX Controller server. You can use the FQDN to access the NGINX Controller web interface. + Additionally, the FQDN is used by Controller Agents when connecting to NGINX Controller. + - **SSL/TLS certificates**: Type `y` to generate and use self-signed certs for running NGINX Controller over HTTPS, or type `n` to provide your own certs. + + {{< important >}} +If you provide your own SSL/TLS certificates, you'll need a complete certificate chain file, with the intermediate CA cert appended to the server cert; the server certificate must appear **before** the chained certificates in the combined file. + {{< /important >}} + +1. Log in to NGINX Controller at `https:///login`. Use the admin email address and password that you provided during the installation process. + +1. Once NGINX Controller is installed, you may safely delete the installer package that you downloaded and extracted. + +  + +--- + +## License NGINX Controller + +To add a license to NGINX Controller, take the following steps: + +1. Go to `https:///platform/license` and log in. +1. In the **Upload a license** section, select an upload option: + + - **Upload license file** -- Locate and select your license file in the file explorer. + - **Paste your Association Token or license file** -- Paste your customer Association Token or the contents of your NGINX Controller license file. These are available on the [MyF5 Customer Portal](https://account.f5.com/myf5). + +1. Select **Save license**. + +{{< see-also >}} +To add a license using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}), send a PUT request to the `/platform/license` endpoint. Provide your CAT or NGINX Controller license as a base64-encoded string in the JSON request body. +{{< /see-also >}} + + +  + +--- + +## Install NGINX Plus + +### Prerequisites + +- Make sure to review the [NGINX Plus Technical Specifications Guide](https://docs.nginx.com/nginx/technical-specs/) for the requirements for your distribution and desired configuration. +- You'll need the NGINX Plus certificate and public key files (`nginx-repo.crt` and `nginx-repo.key`) that were provided when you signed up for the trial license. If you don't have these files, you can use the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}) to download them. + +#### How to Download the NGINX Plus Cert and Key using the NGINX Controller API + +The NGINX Controller API uses session cookies to authenticate requests. The session cookie is returned in response to a `GET /api/v1/platform/login` request. See the Login endpoint in the [NGINX Controller API Reference]({{< relref "/controller/api/_index.md" >}}) documentation for information about session cookie timeouts and invalidation. + +{{< tip >}} +You can send a GET request to the login endpoint to find the status of the session token. +{{< /tip >}} + +For example: + +- Login and capture the session cookie: + + ```curl + curl -c cookie.txt -X POST --url 'https://198.51.100.10/api/v1/platform/login' --header 'Content-Type: application/json' --data '{"credentials": {"type": "BASIC","username": "arthur@arthurdent.net","password": "Towel$123"}}' + ``` + +- Use the session cookie to authenticate and get the session status: + + ```curl + curl -b cookie.txt -c cookie.txt -X GET --url 'https://198.51.100.10/api/v1/platform/login' + ``` + + +
+ +To use the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}) to download your NGINX Plus certificate and key bundle as a gzip or JSON file, send a GET request to the `/platform/licenses/nginx-plus-licenses/controller-provided` endpoint. + +For example: + +- Download JSON file: + + ```bash + curl -b cookie.txt -c cookie.txt --header 'Content-Type: application/json' -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.json + ``` + +- Download GZIP file: + + ```bash + curl -b cookie.txt -c cookie.txt -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.gz + ``` + +{{< note >}} +If you are using a self-signed certificate you will need to add `-k` (allow insecure connections) to your curl command to be able to download your NGINX Plus certificate and key bundle. +{{< /note >}} + + +Once you have downloaded your certificate and key bundle you will need to expand the `.gz` file to get your certificate and key pair. + +For example: + +```bash +gunzip nginx-plus-certs.gz +``` + +### Steps + +Take the following steps to install NGINX Plus: + +{{< important >}} +You need the NGINX Plus certificate and public key files (`nginx-repo.crt` and `nginx-repo.key`) that were provided when you signed up for the trial license. +{{< /important >}} + +1. First, make sure to review the [NGINX Plus Technical Specifications Guide](https://docs.nginx.com/nginx/technical-specs/) for the requirements for your distribution and desired configuration. +2. To install NGINX Plus, follow the instructions in the [NGINX Plus Installation Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/). Refer to the relevant section for your distribution. + +  + +--- + +## Add an NGINX Plus Instance to NGINX Controller + +{{< include "controller/add-existing-instance.md" >}} + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/using-helper-script.md b/content/controller/admin-guides/install/using-helper-script.md new file mode 100644 index 000000000..49828b201 --- /dev/null +++ b/content/controller/admin-guides/install/using-helper-script.md @@ -0,0 +1,450 @@ +--- +description: Learn how to update F5 NGINX Controller installation settings and manage + the NGINX Controller service using the helper.sh script. +docs: DOCS-261 +doctypes: +- task +tags: +- docs +title: Update NGINX Controller Settings with helper.sh +toc: true +weight: 200 +--- + +## Overview + +You can use the F5 NGINX Controller `helper.sh` script to update NGINX Controller installation settings and manage the NGINX Controller process. This tutorial shows you how to use `helper.sh` to perform the following tasks: + +- Install the NGINX Controller prerequisites +- View the version of NGINX Controller that's installed and running +- Start, stop, and restart NGINX Controller +- Back up and restore the NGINX Controller config and encryption keys +- Restore the embedded config database +- Get the NGINX Plus repository key and certificate files (deprecated for `helper.sh` in NGINX Controller v3.9) +- Update the SMTP settings +- Update the database settings +- Update or replace the TLS certificates +- Print the NGINX Controller logs +- Create a support package + +## Install NGINX Controller Prerequisites + + + +{{< include "controller/helper-script-prereqs.md" >}} + + + +  + +--- + +## View the Installed NGINX Version + +To see which version of NGINX Controller is installed and running, type the following command: + +``` bash +/opt/nginx-controller/helper.sh version +``` + +The output looks similar to the following: + +``` bash +Installed version: 3.14.0 +Running version: 3.14.0 +``` + +  + +--- + +## Start, Stop, and Restart NGINX Controller + + +You can use the `helper.sh` script to start, stop, restart, and check the status of the NGINX Controller process. + +``` bash +/opt/nginx-controller/helper.sh controller start +/opt/nginx-controller/helper.sh controller stop +/opt/nginx-controller/helper.sh controller restart +/opt/nginx-controller/helper.sh controller status +``` + +  + +--- + +## Back Up and Restore Config and Encryption Keys + + + +After installing NGINX Controller, you should back up the cluster config and encryption keys. You'll need these if you ever need to restore the NGINX config database on top of a new NGINX Controller installation. + +- To back up the NGINX Controller cluster configuration and encryption keys: + + ```bash + /opt/nginx-controller/helper.sh cluster-config save + ``` + + The file is saved to `/opt/nginx-controller/cluster-config.tgz`. + +- To restore the cluster's config and encryption keys, take the following steps: + + ```bash + /opt/nginx-controller/helper.sh cluster-config load + ``` + + + +  + +--- + +## Restore Embedded Config Database + + + +This section explains how to restore the embedded config database from the latest backup file or a specific, timestamped file. + +{{< important >}}If you restore the config database on top of a new installation of NGINX Controller, make sure to follow the steps to [restore your NGINX config and encryption keys]({{< relref "/controller/admin-guides/backup-restore/backup-restore-cluster-config.md" >}}) afterward. {{< /important >}} + +- To restore the embedded NGINX Controller config database **from the latest automated backup**, run the following command: + + ```bash + /opt/nginx-controller/helper.sh backup restore + ``` + +- To restore the embedded config database from **a specific backup file**: + + ```bash + /opt/nginx-controller/helper.sh backup restore + ``` + + - If you installed the embedded config database on a **local volume**, the backup files are located in `/opt/nginx-controller/postgres_data/`. + + - If you installed the embedded config database on an **NFS volume**, follow the steps in [(NFS) Copy Config Database Backup to Local Volume for Restoration]({{< relref "/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md#nfs-copy-config-database-backup-to-local-volume-for-restoration" >}}) to download the backup file to your local volume, and then use the `helper.sh` script to restore from it. + +  + + + +--- + +## Get NGINX Plus Repository Key and Certificate + +To install NGINX Plus as a data plane for NGINX Controller, you need to have the NGINX repository key and certificate files. + +{{< deprecated >}}Using the helper.sh script to download your NGINX Plus certificate and key bundle is deprecated in in NGINX Controller v3.9.{{< /deprecated >}} + +{{< see-also >}}If you're running NGINX Controller v3.10+, you can use the REST API to [Download the NGINX Plus Cert and Key Bundle]({{< relref "/controller/admin-guides/install/get-n-plus-cert-and-key.md" >}}). {{< /see-also >}}  + +If you're running NGINX Controller 3.9 or earlier, use the `helper.sh` script to extract the NGINX repository key and certificate files: + +```bash +/opt/nginx-controller/helper.sh repository-cred [-c|--cert ] [-k|--key ] +``` + +{{< important >}} + +Make sure that you've [uploaded your license in NGINX Controller]({{< relref "licensing-controller.md" >}}) first before running the `helper.sh repository-cred` command to extract the repository files. + +{{< /important >}} + + + +| Options | Description | +|----------|-------------| +| `-c` \| `--cert` | Creates a certificate called ``. The default file name is `nginx-repo.crt` in the current directory.| +| `-k` \| `--key` | Creates a key called ``. The default file name is `nginx-repo.key` in the current directory. | + +  + +--- + +## Update SMTP Settings + +Use the `helper.sh` script to change the SMTP address; port; TLS; sender; and optionally, the username and password. + +``` bash +/opt/nginx-controller/helper.sh configsmtp
[auth] [username] [password] +``` + +For example: + +``` bash +/opt/nginx-controller/helper.sh configsmtp 192.0.2.0 25 false noreply@example.com true user1 password1 +``` + + + +| Options | Description | +|----------|-------------| +| `address` | The host name or IP address of the SMTP server. | +| `port` | The port of the SMTP server. | +| `tls` | `true` or `false`. Set to `true` to require SSL for connections to the SMTP server. | +| `from` | Sender's email address. | +| `auth` | `true` or `false`. Set to `true` to authenticate when connecting to the SMTP server. | +| `username` | The username to use for access to the SMTP server. | +| `password` | The password to use for access to the SMTP server. | + +  + +### Environment Variables + +We strongly recommend that you use environment variables, especially for passwords, to prevent exposing sensitive information in system processes (for example, `ps`, `top`) and the bash history. + +You use these SMTP environment variables with NGINX Controller: + +| Environment Variables | Description | +|----------|-------------| +| `CTR_SMTP_HOST` | The host name or IP address of the SMTP server. | +| `CTR_SMTP_PORT` | The port of the SMTP server.| +| `CTR_SMTP_TLS` | `true` or `false`; Set to `true` to require SSL for connections to the SMTP server. | +| `CTR_SMTP_FROM` | Sender's email address. | +| `CTR_SMTP_AUTH` | `true` or `false`; Set to `true` to authenticate when connecting to the SMTP server. | +| `CTR_SMTP_USER` | The username to use for access to the SMTP server. | +| `CTR_SMTP_PASS` | The password to use for access to the SMTP server. | + +For example: + +``` bash +CTR_SMTP_HOST=192.0.2.0 \ +CTR_SMTP_PORT=25 \ +CTR_SMTP_TLS=false \ +CTR_SMTP_FROM=noreply@nginx.test \ +CTR_SMTP_AUTH=true CTR_SMTP_USER=user1 CTR_SMTP_PASS=password1 \ +/opt/nginx-controller/helper.sh configsmtp +``` + +  + +--- + +## Update Database Settings + +Use the `helper.sh` script to change the external config database address; port; and optionally, the username, password, and certificate authentication. However, if your current installation uses an internal config database, then these settings are read-only and cannot be modified using the `helper.sh` script (password and certificates will be automatically rotated with each Controller update). + +``` bash +/opt/nginx-controller/helper.sh configdb
[username] [password] [ssl] [ca] [cert] [key] +``` + +For example: + +``` bash +/opt/nginx-controller/helper.sh configdb 192.0.2.1 5432 user1 password1 false +``` + + + +| Options | Description | +|----------|-------------| +| `address` | The host name or IP address of config database. | +| `port` | The port of the database. | +| `username` | The username to use for access to the config database. | +| `password` | The password to use for access to the config database. | +| `ssl` | `true` or `false`. Set to 'true' to require SSL for connections to the config database. | +| `ca` | CA certificate file path. | +| `cert` | Certificate file path. | +| `key` | Key file path. | + +  + +### Environment Variables + +We strongly recommend that you use environment variables, especially for passwords, to prevent exposing sensitive information in system processes (for example, `ps`, `top`) and the bash history. + +You can use these database environment variables with NGINX Controller: + +| Environment Variables | Description | +|----------|-------------| +| `CTR_DB_HOST` | The host name or IP address of the config database. | +| `CTR_DB_PORT` | The port of the config database used for incoming connections. | +| `CTR_DB_USER` | The username for the account to use for access to the config database; must be provided with password. | +| `CTR_DB_PASS` | The password for the account to use for access to the config database; must be provided with username. | +| `CTR_DB_ENABLE_SSL` | `true` or `false`; Set to `true` to require SSL for connections to the config database. | +| `CTR_DB_CA` | CA certificate file path. | +| `CTR_DB_CLIENT_CERT` | Certificate file path. | +| `CTR_DB_CLIENT_KEY` | Key file path. | + +For example: + +```bash +CTR_DB_HOST=192.0.2.1 \ +CTR_DB_PORT=5432 \ +CTR_DB_USER=user1 \ +CTR_DB_PASS=password1 \ +CTR_DB_ENABLE_SSL=false \ +/opt/nginx-controller/helper.sh configdb +``` + +  + +--- + +## Update or Replace TLS Certificates + +Use the `helper.sh` script to update or replace the TLS certificates that are used to connect to NGINX Controller. + +``` bash +/opt/nginx-controller/helper.sh configtls +``` + + + +| Options | Description | +|----------|-------------| +| `cert_file` | Certificate file path. | +| `key_file` | Key file path. | + +  + +--- + +## Print NGINX Controller Logs + +To print the NGINX Controller logs, enter the following command: + +``` bash +/opt/nginx-controller/helper.sh logs +``` + +  + +--- + +## Add a Custom Logo + +The NGINX Controller logo in the user interface is replaceable with a custom logo. The requirements being: + +- The logo file is in SVG format. +- The logo is square in shape. + +{{< note >}} The above steps modify the logo in the top left corner and in the menu, not the favicon. {{< /note >}} + +Follow the steps below to replace the logo: + +1. Connect to the NGINX Controller host using 'ssh'. +1. Transfer the logo file to NGINX Controller using one of the following methods: + 1. Method 1: Download the file using curl after connecting to the host using the command `curl https://example.com/custom-logo.svg`. + 1. Method 2: Upload the logo to the host using SCP: `scp /local/path/custom-logo.svg user@controller-host:/remote/path`. + 1. Method 3: Copy/Paste the logo file. + 1. Copy the logo file to the clipboard before connecting to the host. + 1. After connecting to the host, paste the file. +1. Run `helper.sh setlogo ` ( is the name of the SVG file). +1. Wait for approximately five minutes for the cache to clear and the logo to appear in the user interface. +1. Re-run the `setlogo` command on each NGINX Controller node. This has to be done after an upgrade or reinstallation. + +  + +--- + +## Create a Support Package + +You can create a support package for NGINX Controller that you can use to diagnose issues. + +{{< note >}} +You will need to provide a support package if you open a ticket with NGINX Support via the [MyF5 Customer Portal](https://account.f5.com/myf5). +{{< /note >}}  + +```bash +/opt/nginx-controller/helper.sh supportpkg [-o|--output ] [-s|--skip-db-dump] [-t|--timeseries-dump ] +``` + + + +| Options | Description | +|----------|-------------| +| `-o` \| `--output` | Save the support package file to ``. | +| `-s` \| `--skip-db-dump` | Don't include the database dump in the support package. | +| `-t` \| `--timeseries-dump ` | Include the last `` of timeseries data in the support package (default 12 hours). | + +Take the following steps to create a support package: + +1. Open a secure shell (SSH) connection to the NGINX Controller host and log in as an administrator. + +1. Run the `helper.sh` utility with the `supportpkg` option: + + ```bash + /opt/nginx-controller/helper.sh supportpkg + ``` + + The support package is saved to: + + `/var/tmp/supportpkg-.tar.gz` + + For example: + + `/var/tmp/supportpkg-20200127T063000PST.tar.gz` + +1. Run the following command on the machine where you want to download the support package to: + + ``` bash + scp @:/var/tmp/supportpkg-.tar.gz /local/path + ``` + +### Support Package Details + +{{< include "controller/helper-script-support-package-details.md" >}} + + + +  + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/_index.md b/content/controller/analytics/_index.md new file mode 100644 index 000000000..7a62df501 --- /dev/null +++ b/content/controller/analytics/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn about the F5 NGINX Controller Analytics module. +menu: + docs: + parent: NGINX Controller + title: Analytics +title: Analytics +weight: 120 +url: /nginx-controller/analytics/ +--- diff --git a/content/controller/analytics/alerts/_index.md b/content/controller/analytics/alerts/_index.md new file mode 100644 index 000000000..4bae6e7ab --- /dev/null +++ b/content/controller/analytics/alerts/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn about F5 NGINX Controller alerts and notifications. +menu: + docs: + parent: Analytics + weight: 10 +title: Alerts +weight: 100 +url: /nginx-controller/analytics/alerts/ +--- diff --git a/content/controller/analytics/alerts/about-alerts.md b/content/controller/analytics/alerts/about-alerts.md new file mode 100644 index 000000000..9277494ed --- /dev/null +++ b/content/controller/analytics/alerts/about-alerts.md @@ -0,0 +1,226 @@ +--- +description: Learn about NGINX Controller Alerts and Notifications. +docs: DOCS-520 +doctypes: +- concept +tags: +- docs +title: About Alerts +toc: true +weight: 100 +--- + +## Overview + +The F5 NGINX Controller Analytics module lets you configure alerts and notifications, so you can stay informed about your system and app performance. In this topic, you'll learn about [alerts](#alerts), [alert rules](#alert-rules), and [alert notifications](#alert-notifications). + +{{< see-also >}} +Refer to [Manage Alerts]({{< relref "/controller/analytics/alerts/manage-alerts.md" >}}) to learn how to set up alerts. +{{< /see-also >}} + +## Alerts + +An *alert* is generated when the criteria for an alert rule are met. +All alerts contain the following information: + + + +| Name | Description | +|---|---| +| `started_timestamp` | The time at which the alert was triggered.| +| `last_checked_timestamp` | The time at which the last alert check occurred.| +| `started_value` | The value of the alert metric at the time the alert was triggered.| +| `last_checked_value` | The value of the alert metric when it was last checked.| +| `dimensions` | The list of dimension values for which the alert was triggered.| + +## Alert Rules + +An *Alert Rule* defines the conditions that will trigger an alert. NGINX Controller generates names for alert rules automatically. An alert rule consists of the following information: + + + +| Name | Description | +|---|---| +| `name` | A unique identifier for the alert rule.| +| `display name` | A human-friendly name that helps you identify what the alert rule does. | +| `metric` | The [metric]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) that you want to monitor.
{{< note >}}An alert rule can monitor one metric.{{< /note >}}| +| `operator` | The operator that will be applied to the value of the metric to check if an alert should be triggered. There are two available operators: `le` - less or equal and `ge` - greater or equal.| +| `threshold` | Defines the value that, when exceeded, will trigger an alert.
{{< tip >}}You can find the allowed threshold value(s) for each metric in the **unit** field of the metric's entry in the [Metrics Catalogs Reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}}). Select the "Index" button to access the list of all available metrics and jump directly to that item in the catalog.{{< /tip >}} | +| `period` | Defines the time window in which you want to calculate the aggregated metric value.
- The maximum possible time window is `24h`.
- The minimum possible time window is `2m`.| +| `filter` | Lets you refine the alert rule for a more specific set of metric values, based on dimensions.
If no filter is provided, all collected data will be used when calculating the alert rule status.| +| `group by` | Groups results according to the specified dimension(s). A separate alert will be triggered for each result group. You can provide multiple dimension names as a comma-separated list.
{{}}Using a dimension with a high cardinality of values might result in a high volume of alerts.{{}}| +| `notification type` | Defines how you want to receive alert notifications. | +| `email addresses` | A comma-separated list of email addresses that should receive alert notifications.| +| `mute` | Boolean; turns alert notifications on and off. Set to 'on' to mute notifications. | + +If you leave any rule parameter blank, NGINX Controller will take all relevant data for the parameter into account in the alert rule calculation. + +Each Alert Rule has a status that describes the current state of the alert rule. It contains the following information: + + + + + +| Name | Description | +|---|---| +| `alerts count` | The total number of triggered alerts for the Alert Rule since its creation.| +| `status: ok` | The rule has not triggered any alerts, or that all triggered alerts have expired.| +| `status: ongoing` | At least one alert for the alert rule is currently ongoing.| +| `lastCheckedTimestamp` | The time when the alert rule was last checked successfully.| +| `lastStartedTimestamp` | The time when alert rule status has changed from 'ok' to 'ongoing'.| +| `lastExpiredTimestamp` | The time when alert rule status has changed from 'ongoing' to 'ok'.| + +
+ +Alert rules work in the following manner: + +1. Incoming metric updates are continuously monitored against the set of alert rules. +2. The most recent metric value is checked against the threshold defined in the alert rule. +3. If the threshold is met, an alert notification is generated and the rule will continue to be monitored. In the [Alerts Status]({{< relref "/controller/analytics/alerts/manage-alerts.md#view-alert-rule-status" >}}) pane, the alert instance's status will be displayed as "ongoing". +4. If subsequent metric updates show that the metric no longer violates the threshold for the configured period, the alert expires. + +## Alert Notifications + +An *Alert notification* is a message either displayed in the NGINX Controller user interface or sent via email. Alert notifications are sent when an alert is triggered or expired, depending on the alert rule criteria. + +- The **Notifications** feed contains information about all changes in the system, including alert changes. To access the Notifications feed, select the bell icon next to the **Account Settings** menu. +- A notification appears in the Notifications feed immediately when an alert is triggered or expires. +- Alert instance emails notify you when a single alert instance starts or expires. + +If you want to stop receiving notifications for an alert rule, but you don't want to delete it, you can [mute the alert rule]({{< relref "/controller/analytics/alerts/manage-alerts.md#mute-or-unmute-an-alert-rule" >}}). +Likewise, if you want to stop receiving emails for an alert rule, but you do want to continue receiving the user interface notifications, [edit the alert rule]({{< relref "/controller/analytics/alerts/manage-alerts.md#edit-an-alert-rule" >}}) and remove your email address. + +{{< note >}}If you mute an alert rule while the alert rule status is "ongoing", you will not receive any further alert notifications, including when the alert rule status changes.{{< /note >}} + +### Email notifications + +{{< important >}} +You must [verify your email address]({{< relref "/controller/analytics/alerts/manage-registered-emails.md" >}}) in order to receive alert notification emails. +{{< /important >}} + +When an alert rule's conditions are met, NGINX Controller sends an alert email with the subject "[controller-alert] Alert started: " to all of the email addresses that are specified in the alert rule. + +If multiple alerts are triggered in a single calculation period, NGINX Controller sends a summary email message that contains all of the alerts for the time period. + +When an alert instance expires, NGINX Controller sends a message with subject "[controller-alert] Alert expired: " to all of the email addresses that are specified in the alert rule. + +The notification uses the automatically-generated name that was assigned by the system when the rule was created. + +NGINX Controller sends summary emails once every hour. These emails contain alerts that have been triggered or expired since the last summary email was sent. If no alerts started or expired in that timeframe, then the summary will not be sent. + +### How Many Notifications to Expect + +As an example, let's say that you have three instances configured in the NGINX Controller. You want to monitor all three instances based on the `http.request.count` metric. + +Assuming that traffic is constantly flowing through all three instances, and the threshold is exceeded for all three, the system will return three alerts (one per instance). In this case, you would receive one email, containing three alert notices, and three user interface notifications. + +If the threshold is exceeded for one instance, then you will receive one alert email and one notification in the user interface. + +## How Alerts Work + +NGINX Controller checks the list of configured alert rules every 30 seconds. Then, it queries the [Metrics API]({{< relref "/controller/analytics/metrics/metrics-api.md" >}}) for the data defined in each alert rule. + +The API query uses the following template: + +`?names=()&startTime=now-&endTime=now<&additional-alert-rule-parameters>"` + +where + +- `` is the appropriate [aggregation function]({{< relref "/controller/analytics/metrics/metrics-api.md#aggregations" >}}) for the metric. You can find this information in the [Metrics Catalog Reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}}). + - `AVG` applies to `gauge` metrics. Gauges are averaged over the time period configured in the alert rule. + - `MAX` applies to `counter` metrics. + - `SUM` applies to `incremental` metrics. + +- The `` and `` parameters are read from the alert rule configuration. +- `<&additional-alert-rule-parameters>` e.g. `filter` or `groupBy` parameters read from the alert rule configuration. + +NGINX Controller checks the value returned by the Metrics API against the configured threshold, then takes the appropriate action: + + + +| Conditions | Action | +|---|---| +| - threshold is exceeded
- "ongoing" alert does not exist | Triggers new alert. | +| - threshold is exceeded
- "ongoing" alert exists | Updates existing alert's `last_checked_timestamp` and `last_checked_value`. | +| - threshold *is not* exceeded
- "ongoing" alert exists | Expires alert.| +| - threshold *is not* exceeded
- "ongoing" does not exist | No action.| + +Next, the alert rule status is updated. Each alert rule will be updated with a new `last_checked_timestamp` and new `status`, if applicable. + +Finally, the alert notifications for newly-created or expired alerts will be sent for any rules that are not muted. + +{{< important >}} +If the [Metrics API]({{< relref "/controller/analytics/metrics/metrics-api.md" >}}) query does not return any data -- for example, if there was no traffic through the instance and therefore no metric value -- NGINX Controller assumes a value of `0`. In such cases, the threshold will be compared to `0`. +{{< /important >}} + +## Alert special cases + +### Alerts for the controller.agent.status metric + +The `controller.agent.status` is a special metric representing the heartbeat of the NGINX Agent running on the instance. +The metric is reported every 1 minute by the NGINX Agent to the NGINX Controller and may only have a value of 1 if the NGINX Agent is healthy. +If the NGINX Agent is unhealthy it is not reporting the heartbeat and effectively no values for the `controller.agent.status` are stored by the NGINX Controller. +Based on this metric it is possible to create an alert rule and receive notifications whenever the total number of heartbeats reported by a certain NGINX Agent in a recent period is below or equal (or above or equal) certain threshold. + +For example, you would like to receive notifications whenever the NGINX Agent availability at any instance is less or equal 70%. +To achieve that: + +1. Create an alert rule for the `controller.agent.status` metric. +2. Set the period to at least 10 minutes (recommended, to avoid flapping conditions). Heartbeats arrive every minute while the alert status is evaluated every 30 seconds. +3. Set the threshold to 7 of the NGINX Agent availability (7 heartbeats received in the last 10 min). +4. Set the operator to below or equal. +5. Break out by the instance dimension to get notified about the NGINX Agent availability per instance. + +## What's Next + +- [Create and Manage Alert Rules]({{< relref "/controller/analytics/alerts/manage-alerts.md" >}}) +- [Manage Registered Emails]({{< relref "/controller/analytics/alerts/manage-registered-emails.md" >}}) +- [NGINX Controller REST API Reference]({{< relref "/controller/api/_index.md" >}}) + +{{< versions "3.13" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/alerts/manage-alerts.md b/content/controller/analytics/alerts/manage-alerts.md new file mode 100644 index 000000000..746390e08 --- /dev/null +++ b/content/controller/analytics/alerts/manage-alerts.md @@ -0,0 +1,104 @@ +--- +description: Learn how to view, add, mute, and delete Alerts. +docs: DOCS-521 +doctypes: +- concept +tags: +- docs +title: Manage Alerts +toc: true +weight: 200 +--- + +## Overview + +[Alerts]({{< relref "/controller/analytics/alerts/about-alerts.md" >}}) are notifications about the F5 NGINX Controller system and your applications' performance. + +[Alert rules]({{< relref "/controller/analytics/alerts/about-alerts.md#alert-rules" >}}) let you specify what you want to be alerted about. This includes which metrics you want to monitor; the trigger conditions and threshold to meet; the instance(s) to monitor; and the email address(es) to use for notifications. + +## Add an Alert Rule + +To add an alert rule: + +1. Open the NGINX Controller user interface and log in. +1. On the Analytics menu, select **Alerts > Alert Rules**. +1. Select **Create Alert Rule**. +1. Define your alert rule by providing the following information: + + - Name + - (Optional) Display Name + - Metric + - Condition, Threshold, and Time Period + - Filter + - (Optional) Breakout + - Email Notification Address(es): + + - Select the desired address(es) from the list provided, or + - Select **Manage Email Addresses** to add a new address, then take the steps below: + + 1. Select **Add Email Address**. + 1. Provide the desired email address. + 1. Select the submit (plus sign) icon. + 1. Select **Done** to close the Manage Email Addresses panel. + + {{}}You will need to verify the email address before it can begin receiving alerts.{{}} + +1. (Optional) Select **Mute Alert Rule** if you want to create the alert rule but not receive any associated notifications. +1. Select **Create**. + +## View Alerts + +To view all **alerts** that are triggered by alert rules: + +1. Open the NGINX Controller user interface and log in. +1. On the Analytics menu, select **Alerts > Alerts**. + +All alert rules and triggered alerts are displayed on this page. You can use the search bar to filter the alerts that are displayed. + +## Edit an Alert Rule + +To edit an alert: + +1. Open the NGINX Controller user interface and log in. +1. On the Analytics menu, select **Alerts > Alert Rules**. +1. Select the alert rule that you want to edit. +1. Select the edit (pencil) icon for the alert rule. +1. Make the desired changes to the alert rule, then select **Save**. + +{{< important >}} +When you edit an alert rule, any ongoing alerts which previously met that rule will expire immediately. + +If the threshold is still exceeded in the new alert rule configuration, new alerts will be triggered. +{{< /important >}} + +## Mute or Unmute an Alert Rule + +If you want to stop receiving notifications for an alert rule without deleting it, you can mute it. Likewise, you can unmute alert rules for which you want to resume receiving notifications. + +To mute or unmute an alert: + +1. Open the NGINX Controller user interface and log in. +1. On the Analytics menu, select **Alerts > Alert Rules**. +1. Select the alert rule that you want to mute or unmute. +1. Select the mute (volume) icon to mute or unmute the alert rule. + +## Delete an Alert Rule + +To delete an alert rule: + +1. Open the NGINX Controller user interface and log in. +1. On the Analytics menu, select **Alerts > Alert Rules**. +1. Select the alert rule that you want to delete. +1. Select the delete (trash can) icon to delete the alert rule. +1. Select **Delete** in the pop-up box to confirm that you want to proceed. + +## What's Next + +- Learn more [About Alerts]({{< relref "/controller/analytics/alerts/about-alerts.md" >}}) +- Learn more about [Metrics and Metadata]({{< relref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) +- Learn more about [Traffic Metrics]({{< relref "/controller/analytics/metrics/overview-traffic-metrics.md" >}}) +- [Manage Registered Emails]({{< relref "/controller/analytics/alerts/manage-registered-emails.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/alerts/manage-registered-emails.md b/content/controller/analytics/alerts/manage-registered-emails.md new file mode 100644 index 000000000..f21d815a6 --- /dev/null +++ b/content/controller/analytics/alerts/manage-registered-emails.md @@ -0,0 +1,71 @@ +--- +description: Learn how to manage the email addresses that receive automatic alert + notifications. +docs: DOCS-522 +doctypes: +- concept +tags: +- docs +title: Manage Registered Email Addresses +toc: true +weight: 310 +--- + +## Overview + +In order to receive email notifications for [Alerts]({{< relref "/controller/analytics/alerts/about-alerts.md" >}}), you need to provide a valid email address and complete the verification process. + +{{< important >}} +You will not receive any alert notifications via email until you verify your email address. Any alert notification emails that were triggered by alert rules prior to the email address being verified will not be re-sent. +{{< /important >}} + +## List Registered Email Addresses + +To find the list of registered email addresses: + +1. Open the F5 NGINX Controller user interface and log in. +1. On the **Analytics** menu, select **Alerts**. +1. On the **Alert Rules Overview** page, select **Manage Email Addresses**. +1. All registered email addresses are displayed in the Manage Email Addresses panel. To close the panel, select **Done**. + +{{}}The **Manage Email Addresses** button is not displayed if you don't have any Alerts configured. If this is the case, you can add a new email address when you [create an alert rule]({{< relref "/controller/analytics/alerts/manage-alerts.md#add-an-alert-rule" >}}).{{}} + +## Add a New Email Address + +To add a new email address: + +1. Open the NGINX Controller user interface and log in. +1. On the **Analytics** menu, select **Alerts**. +1. On the **Alert Rules Overview** page, select **Manage Email Addresses**. +1. In the **Manage Email Addresses** panel: +1. Select **Add Email Address**. +1. Provide the desired email address. +1. Select the submit (plus sign) icon. +1. Select **Done** to close the Manage Email Addresses panel. +1. Check your email inbox for a message with the subject `[controller-team] Email verification`. +1. Click on the link provided in the email to complete the verification process. + +## Re-send a Verification Email + +To re-send a verification email to a newly-registered email address: + +1. Open the NGINX Controller user interface and log in. +1. On the **Analytics** menu, select **Alerts**. +1. On the **Alert Rules Overview** page, select **Manage Email Addresses**. +1. Select the Resend verification (circular arrows) icon to the right of the email address. +1. Select **Done** to close the Manage Email Addresses panel. + +## Remove a Registered Email Address + +To remove a registered email address: + +1. Open the NGINX Controller user interface and log in. +1. On the **Analytics** menu, select **Alerts**. +1. On the **Alert Rules Overview** page, select **Manage Email Addresses**. +1. On the **Manage Email Addresses** panel, select the Delete email address (trash can) icon to the right of the email address that you want to remove. +1. In the **Delete Email Address** pop-up window, select **Delete** to confirm. +1. Select **Done** to close the Manage Email Addresses panel. + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/alerts/service-now-notifications.md b/content/controller/analytics/alerts/service-now-notifications.md new file mode 100644 index 000000000..f14ff27cc --- /dev/null +++ b/content/controller/analytics/alerts/service-now-notifications.md @@ -0,0 +1,57 @@ +--- +description: Set up Alerts Integration with ServiceNow. Deprecated in v3.13. +docs: DOCS-523 +doctypes: +- task +tags: +- docs +- deprecated +title: ServiceNow Alerts Integration +toc: true +weight: 600 +--- + +## ServiceNow Alert Integration + +{{< deprecated >}} +**The ServiceNow Alert Integration is deprecated in F5 NGINX Controller v3.13.** +{{< /deprecated >}} + +The ServiceNow integration sends all notifications from NGINX Controller to the Incidents table in your ServiceNow account. Follow the steps below to set up the integration. + +1. Install Python3 on your machine. +2. In your ServiceNow instance, go to **System OAuth > Application Registry** and create a new OAuth API endpoint for external clients. + + Fill out the form and specify a long refresh token lifespan. Consider aligning the token lifespan with the expiry date of your NGINX Controller license. + + {{< important >}} The ServiceNow integration will fail once the refresh token expires.{{< /important >}} + +3. Select the **Configure ServiceNow** button. In the prompt, provide the requested information for the ServiceNow client and select **Save**. + + - **ServiceNow Instance** - The instance ID for your ServiceNow account. + - **Client ID** - Client ID from ServiceNow (from Step 2). + - **Client Secret** - Client Secret from ServiceNow (from Step 2). + - **Username** - Your ServiceNow username; this is used to generate the access token and will not be stored. + - **Password** - Your ServiceNow password; this is used to generate the access token and will not be stored. + - **Controller host** - The URL of your NGINX Controller instance. + - **Controller email** - The email that you use to log in to Controller. + - **Controller password** - The password that you use to log in to Controller. + - **Controller port** - The port on which NGINX Controller is running; the default is 80. + - **Company name** - The name of your company; this is used to create the ServiceNow transport. +
+4. Watch Controller alerts come through as incidents in ServiceNow. + + Mapping of Controller Alerts to ServiceNow Priority: + + - ('alerts', 'created') → 1 + - ('alerts', 'cleared') → 3 + - ('agent', 'nginx_not_found') → 1 + - ('agent', 'nginx_config_parsing_error') → 1 + - ('ssl_expiration', 'ssl_cert_has_expired') → 1 + - ('ssl_expiration', 'ssl_cert_will_expire') → 2 + - ('agent', 'agent_version_old') → 2 + - ('agent', 'agent_version_obsoleted') → 1 + - ('group_actions', 'group_action_completed') → 3 + +{{< versions "3.0" "3.13" "ctrlvers" >}} + diff --git a/content/controller/analytics/catalogs/_index.md b/content/controller/analytics/catalogs/_index.md new file mode 100644 index 000000000..42f5a7ff3 --- /dev/null +++ b/content/controller/analytics/catalogs/_index.md @@ -0,0 +1,13 @@ +--- +aliases: +- /analytics/dimensions/reference/ +- /analytics/metrics/reference/ +description: Reference information for F5 NGINX Controller Catalogs. +menu: + docs: + parent: Analytics + weight: 20 +title: Catalogs Reference +weight: 210 +url: /nginx-controller/analytics/catalogs/ +--- diff --git a/content/controller/analytics/catalogs/dimensions.md b/content/controller/analytics/catalogs/dimensions.md new file mode 100644 index 000000000..dbf884d61 --- /dev/null +++ b/content/controller/analytics/catalogs/dimensions.md @@ -0,0 +1,19 @@ +--- +catalog: true +description: Information about all of the Dimensions collected by F5 NGINX Controller + Agent. +docs: DOCS-524 +doctypes: +- reference +tags: +- docs +title: NGINX Controller Dimensions Catalog +toc: false +weight: 20 +--- + +{{< dimensions path="/static/ctlr/catalogs/dimensions-catalog.json" >}} + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/catalogs/metrics.md b/content/controller/analytics/catalogs/metrics.md new file mode 100644 index 000000000..39e167b92 --- /dev/null +++ b/content/controller/analytics/catalogs/metrics.md @@ -0,0 +1,18 @@ +--- +catalog: true +description: Information about all of the Metrics collected by F5 NGINX Controller Agent. +docs: DOCS-525 +doctypes: +- reference +tags: +- docs +title: NGINX Controller Metrics Catalog +toc: false +weight: 20 +--- + +{{< metrics path="/static/ctlr/catalogs/metrics-catalog.json" >}} + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/dashboards/_index.md b/content/controller/analytics/dashboards/_index.md new file mode 100644 index 000000000..8b6ae2a58 --- /dev/null +++ b/content/controller/analytics/dashboards/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn about F5 NGINX Controller Dashboards. +menu: + docs: + parent: Analytics + weight: 40 +title: Dashboards +weight: 120 +url: /nginx-controller/analytics/dashboards/ +--- diff --git a/content/controller/analytics/dashboards/application-health-score.md b/content/controller/analytics/dashboards/application-health-score.md new file mode 100644 index 000000000..135b78d2c --- /dev/null +++ b/content/controller/analytics/dashboards/application-health-score.md @@ -0,0 +1,95 @@ +--- +description: View and understand the Application Health Score for your application. +docs: DOCS-526 +doctypes: +- concept +tags: +- docs +title: Understanding the Application Health Score +toc: true +weight: 20 +--- + +## Overview + +When you log in to the F5 NGINX Controller user interface, you will see the **Analytics Dashboard Overview** page. This page contains an Application Health Score (AHS) that reflects your application's performance. + +The AHS is a customizable [Apdex-like numerical measure](https://www.apdex.org/) that can be used to estimate the quality of experience for your web application. It lets you configure service-level monitoring for your applications. + +You can select any combination of the following three service-level indicators (SLI) to create your AHS: + +- Successful requests (selected by default), +- (Optional) Request time (95th percentile), and +- (Optional) NGINX Controller Agent availability. + +Successful requests are determined according to the total observed average request time (P95) either below the low threshold (100% satisfying) or between the low and high threshold (partially satisfying). + +A simplified formula for AHS is as follows: + +`AHS = (Successful Requests %) * (Timely Requests %) * (Agent Availability %)` + +When you select the Request Time (95th percentile) for inclusion in the AHS, you can set two thresholds for the total observed average request time (P95): + +- Low threshold for satisfying requests. +- High threshold for partially satisfying requests. + +If the average request time (P95) for the selected time period is below the low threshold, this is considered as a "100% satisfying" state of requests. + +If the request time is above the low threshold and below the high threshold, a "satisfaction ratio" is calculated accordingly. +Requests above the high threshold are considered to be "0%", or "unsatisfying". + +For example: If the low threshold is 0.2s and the high threshold is 1s, a request time greater than 1s would be considered unsatisfying and the resulting score would be 0%. + +The algorithm for calculating the AHS is as follows. Here, `T1` represents the low threshold and `T2` represents the high threshold. + +```nginx +successful_req_pct = (nginx.http.request.count - nginx.http.status.5xx) / nginx.http.request.count + +if (nginx.http.request.time.pctl95 < T1) + timely_req_pct = 1 +else + if (nginx.http.request.time.pctl95 < T2) + timely_req_pct = 1 - (nginx.http.request.time.pctl95 - T1) / (T2 - T1) + else + timely_req_pct = 0 + +m1 = successful_req_pct +m2 = timely_req_pct +m3 = agent_up_pct + +app_health_score = m1 * m2 * m3 +``` + +## Customize the Application Health Score + +Take the steps below to customize the Application Health Score (AHS) that displays on the Overview page. + +{{< note >}} +By default, the AHS and other metrics on the **Overview** page are calculated for all of the Instances monitored by the Controller Agent. +{{< /note >}} + +1. Open the NGINX Controller user interface and log in. +2. On the **Overview** page, select the Settings (gear) icon in the Application Health Score panel. +3. In the **Service Level Monitoring** window, define the following: + + - (Optional) Create a custom name for the monitor (replaces "Application Health Score"). + - (Optional) Select tags to narrow the data source(s) to a specific Instance or set of Instances. + - Select the Service Indicators that you want to include in the score calculation. + + - Successful requests (selected by default). + - Request time (95th percentile): Set a low threshold and a high threshold, in seconds. + - Agent availability. + +4. Select **Save**. + +## What's Next + +- [Overview of metrics and metadata]({{< relref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) +- [Set up Metrics Collection]({{< relref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) +- [Metrics Catalog Reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) +- [Dimensions Catalog Reference]({{< relref "/controller/analytics/catalogs/dimensions.md" >}}) +- [Custom Dashboards]({{< relref "/controller/analytics/dashboards/custom-dashboards.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/dashboards/custom-dashboards.md b/content/controller/analytics/dashboards/custom-dashboards.md new file mode 100644 index 000000000..de2458940 --- /dev/null +++ b/content/controller/analytics/dashboards/custom-dashboards.md @@ -0,0 +1,141 @@ +--- +description: Create custom dashboards to view custom graphs. +docs: DOCS-527 +doctypes: +- task +tags: +- docs +title: Create Custom Dashboards +toc: true +weight: 20 +--- + +## Overview + +You can use the F5 NGINX Controller user interface to create your own Dashboards populated with customizable graphs of NGINX and system-level metrics. + +{{< note >}} + +- You can add up to 30 Elements to Dashboard. +- Dashboards are accessible by all Users. + +{{< /note >}} + +## Before You Begin + +- [Install the NGINX Controller Agent on instances that you want to monitor]({{< relref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}) +- [Configure Metrics collection on your NGINX instances]({{< relref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) + +## Dashboards + +In NGINX Controller, you can create dashboards to display custom graphs. Some use cases for custom graphs include the following: + +- Checking NGINX performance for a particular application or microservice, for example, based on the URI path +- Displaying metrics per virtual server +- Visualizing the performance of a group of NGINX servers, for example, front-end load-balancers or an NGINX edge caching layer +- Analyzing a detailed breakdown of HTTP status codes per application + +When building a custom graph, metrics can be summed or averaged across NGINX servers. By using metric filters, it is possible to create additional "metric dimensions", for example, reporting the number of POST requests for a specific URI. + + {{< note >}} + +The functionality of user-defined dashboards recently changed in NGINX Controller. Some of the functionalities that were present in the +previous version might not be currently present or work differently. Your old dashboards were not migrated to the new version. + + {{< /note >}} + +## Create a Custom Dashboard + +To create a custom dashboard: + +1. Open the NGINX Controller user interface and log in. +2. The first page you will see is the **Analytics Overview** page. +3. Select the Dashboards tab to see the **My Dashboards** list page. +4. To create a new dashboard - use **Create** button and provide required information. + +### Add a Dashboard Element + +To add an Element to a Dashboard: + +1. Create a new Dashboard or select the edit icon to edit an existing Dashboard. +2. Select **Add element** button. +3. Provide a title. +4. (Optional) Enter a description of the Element. +5. Select the type of Element to add: + + - **Line chart** displays data for a specific time period + - **Stat** displays the metric's most recent value + +6. Select a metric from the drop-down menu. +7. Select the aggregation method for the selected metric. + {{< see-also >}} +For more information about metrics and supported aggregation methods, see the [Metrics Catalog Reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}}). + {{< /see-also >}} +8. (Optional) Add a filter to refine the data. For example, you can limit the data to a specific App or Environment. +9. (Optional) Select **Add metrics** to add more metrics. + {{< note >}} +Additional metrics can only be added to a **Line chart** Element. + {{< /note >}} +10. (Optional) Select the **Override Default Time Settings** option to select a time range for the Element. + + - The default time range is the last seven days. + - You can select a new pre-defined time range or select **Custom time range** to define a new time range. + +11. Select **Create** or **Edit** to save your Element settings. + +## Filter Metrics + +You can use the filtering functionality for NGINX metrics. If you select **Add filter**, you can add multiple criteria to define specific "metric dimensions". + +The filter consists of one or more expressions in a form of: + +`dimensionName operator value` + +where: + +- `dimensionName` is a name of the dimension from the dimensions catalog +- `operator` is a comparison rule (equality, likeliness, etc.) +- `value` is a value to which we want compare the dimensions value + +Filters can be used in conjunction using `AND` or `OR` logical operators. There is no possibility of nesting these expressions. + +Filters are used to narrow down the data set presented on the chart/stat. For example, you may not want to display the data for all of your applications, but only for the particular one. + +## Limitations + +- You are not able to add more than 30 elements to the single dashboard. +- All dashboards are accessible for all users. +- Dashboards defined in the old custom dashboards view are not migrated to the new dashboards view. + +## Clone a Custom Dashboard + +To clone an existing dashboard from the Dashboards page, select the **Clone** icon on a dashboard's row, or select **Clone** from the toolbar above the table (you need to select a dashboard first). + +You can also clone a dashboard from the elements view using the **Clone Dashboard** button. This button is not available in "edit" mode, so make sure you finish editing a dashboard before cloning it. + +When you clone a dashboard, the new one will have the same display name as the original dashboard + the current date. For example, if you clone the "My system graphs" dashboard, the cloned dashboard's display name will be something like "My system graphs Aug 24, 2021, 14:37:32". You can change the display name later on the Edit Config page. + +## Predefined Dashboards + +You can find predefined dashboards on the Dashboards page. Predefined dashboards have a special "Read Only" tag, include elements to show the most common metrics, and cover some common cases. The predefined dashboards might be helpful in learning how custom dashboards work. You can clone any of the predefined dashboards and then modify them as needed. + +Predefined dashboards cannot be deleted or modified. + +{{< note >}} + +- Predefined dashboards were introduced in NGINX Controller 3.21. +- If you already have custom dashboards, the predefined ones should appear at the end of the list when default sorting is applied. + +{{< /note >}} + +## What's Next + +- [Overview Dashboard]({{< relref "/controller/analytics/dashboards/overview-dashboard.md" >}}) +- [Overview of Metrics and Metadata]({{< relref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) +- [Set up Metrics Collection]({{< relref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) +- [Metrics Catalog Reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) +- [Dimensions Catalog Reference]({{< relref "/controller/analytics/catalogs/dimensions.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/dashboards/overview-dashboard.md b/content/controller/analytics/dashboards/overview-dashboard.md new file mode 100644 index 000000000..fa1467bd3 --- /dev/null +++ b/content/controller/analytics/dashboards/overview-dashboard.md @@ -0,0 +1,67 @@ +--- +description: Learn about the Dashboards that displays cumulative metrics for your + NGINX Instances. +docs: DOCS-528 +doctypes: +- task +tags: +- docs +title: Analytics Overview +toc: true +weight: 10 +--- + +## Overview + +The **Analytics Dashboards** provides an at-a-glance summary of the state of your F5 NGINX infrastructure and your application performance. + +## Before You Begin + +- [Install the NGINX Controller Agent on Instances that you want to monitor]({{< relref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}) + +## Overview Dashboard + +When you log in to the NGINX Controller user interface, the **Analytics Overview** page displays first by default. Select the Dashboards tab to see the **My Dashboards** list page. On the **Dashboard Overview** page, you can view the key indicators noted below. By default, the graphs display metrics for the last hour. You can select any of the default time periods -- one hour, four hours, one day, two days, or one week -- to get a better idea of your apps' overall health and performance. To view metrics over longer time periods, you can create a [custom dashboard]({{< relref "/controller/analytics/dashboards/custom-dashboards.md" >}}). + +The cumulative [metrics]({{< relref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) displayed on the **Analytics Overview** page are: + +### System Metrics + +- [Application Health Score]({{< relref "/controller/analytics/dashboards/application-health-score.md" >}}): the health score for your application. +- Average CPU: 100 - AVG of the system.cpu.idle (CPU spent in an idle state) +- Average Memory: AVG of the `system.mem.used` metric + +### Application Metrics + +- Time to First Byte: AVG of the `client.ttfb.latency.max` metric +- Bytes In/s (Bytes In per second): RATE of the `http.request.bytes_rcvd` metric +- Bytes Out/s (Bytes Out per second): RATE of the `http.request.bytes_sent` metric + +- Total Requests: SUM of the `nginx.http.request.count` metric. +- HTTP 5XX Errors: SUM of the `nginx.http.status.5xx` metric. +- HTTP 4XX Errors: SUM of the `nginx.http.status.4xx` metric. +- Request time (P95): AVG of the `nginx.http.request.time.pctl95` metric. + +- Avg Client Response Latency: AVG of the `client.response.latency.max` metric +- Avg Upstream Response Latency: AVG of the `upstream.response.latency.max` metric +- Avg Client Network Latency: AVG of the `client.network.latency.max` metric. + +{{< note >}} + +By default, the metrics are calculated for **all** of your Controller Agent-monitored Instances. + +To display metrics for a specific set of hosts (for example, only for "production"), select the gear icon on the Application Health Score panel, then add a tag or tags by which you want to filter the results. + +{{< /note >}} + +## What's Next + +- [Overview of metrics and metadata]({{< relref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) +- [Metrics Catalog Reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) +- [Dimensions Catalog Reference]({{< relref "/controller/analytics/catalogs/dimensions.md" >}}) +- [Application Health Score]({{< relref "/controller/analytics/dashboards/application-health-score.md" >}}) +- [Custom Dashboards]({{< relref "/controller/analytics/dashboards/custom-dashboards.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/data-explorer/_index.md b/content/controller/analytics/data-explorer/_index.md new file mode 100644 index 000000000..59a5fefb5 --- /dev/null +++ b/content/controller/analytics/data-explorer/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn about F5 NGINX Controller Data Explorer. +menu: + docs: + parent: Analytics + weight: 40 +title: Data Explorer +weight: 120 +url: /nginx-controller/analytics/data-explorer/ +--- diff --git a/content/controller/analytics/data-explorer/how-to-use.md b/content/controller/analytics/data-explorer/how-to-use.md new file mode 100644 index 000000000..6c77eb329 --- /dev/null +++ b/content/controller/analytics/data-explorer/how-to-use.md @@ -0,0 +1,150 @@ +--- +description: Use the Data Explorer to examine the metrics that F5 NGINX Controller collects. +docs: DOCS-529 +doctypes: +- task +tags: +- docs +title: How To Use the Data Explorer +toc: true +weight: 20 +--- + +## Overview + +This topic explains how to use the Data Explorer to view the metrics that F5 NGINX Controller collects. + +The Data Explorer lets you perform these following tasks: + +- Easily switch between contexts, metrics, and dimensions +- Specify a time range of interest +- Set the aggregation mode +- Compare results to previous periods +- Export the query that's used to generate the charts as a URL, which you can use outside of NGINX Controller + +  + +## Select the Context + +To get started with the Data Explorer, you need to select the context for the data you want to view. + +1. Open the NGINX Controller user interface and log in. +1. Select the NGINX Controller menu icon, then select **Analytics > Explorer**. +1. On the Data Explorer detail page, select a context area -- **Instances**, **Environments**, **Gateways**, or **Apps** -- for which you want to view data. + +{{< note >}} +When you access the Data Explorer from other areas of the browser interface, the context is already defined. So, for example, if you select Data Explorer from within the Instances module (**Infrastructure > Instances > Data Explorer**), the data for your instances is displayed. When you switch between contexts, the metrics options, such as `system.cpu.idle` or `system.load.5`, are updated. +{{< /note >}} + +  + +## Select a Resource + +When you [select the context](#select-the-context) for the Data Explorer, a list of related resources is shown. If there aren't any related resources for the selected context, you'll see the message "No Data" on the Data Explorer detail page. + +{{< note >}} + +If you don't see a resource in the list, but you expect it to be there, check the [selected metric](#metrics) and the [selected time range](#time-range). When a resource doesn't have the data for the [selected time range](#time-range) it won't be added to the resources list. + +{{< /note >}} + +To view data for a resource, select the resource's name from the resource list. + +{{< img src="/controller/analytics/data-explorer/images/data-explorer_resource.png">}} + +## Metrics + +The [list of metrics]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) is sorted alphabetically, and you can use the search feature to filter the list. As previously mentioned, the list of metrics depends on the context you've selected for the Data Explorer. For example, if you've chosen Instances for the context, then the list of metrics will be for instances. + +{{< img src="/controller/analytics/data-explorer/images/data-explorer_metric.png">}} + +When the selected metric changes, the **Aggregation** and **Group By** selectors are updated correspondingly (as well as the [list of resources](#select-a-resource) and the [Dimensions panel](#dimensions-panel)). Some metrics have different lists of **Aggregation** and **Group By** values. For example, the `http.response_code` dimension, which is a valid **Group By** value for the `http.request.count` metric, is not available for the `nginx.workers.cpu.user` metric because these items are from different contexts and aren't related to each other. + +## Aggregation Mode + +Use the Aggregation selector -- the Σ symbol with possible values of `AVG`, `MAX`, `MIN`, `RATE`, and `SUM` -- to [aggregate the data]({{< relref "/controller/analytics/metrics/metrics-api.md#aggregations" >}}). The list of possible aggregation values depends on the metrics that's selected. + +{{< img src="/controller/analytics/data-explorer/images/data-explorer_aggregation.png">}} + +## Group by Dimension + +Use the **Group By** selector to [group the data by a chosen dimension]({{< relref "/controller/analytics/metrics/metrics-api.md#groupby" >}}). + +In the following example image, the data for the `bytes_rcvd` metric is grouped by the dimension `http.request_method`, which displays a data series for the HTTP methods `DELETE`, `GET`, `LINK`, and so on. + +{{< img src="/controller/analytics/data-explorer/images/data-explorer_group-by.png">}} + +When a **Group By** selection is applied, the chart displays a top-10 data series. For example, let's say you want to check disk usage, so you select the metric `system.disk.total` and `file_path` as the dimension to group by. The chart would then display the top-10 mount points with the highest values. If you have more than 10 mount points, you'll see the top-10 mount points plus an 11th data series that's an aggregation of the rest of the data using the same selection criteria. In other words, you'll see a chart of the 10 most used mount points plus a chart of all the other mount points aggregated into one data series. When a **Group By** dimension is selected, and there are more than 10 dimensions, the 11th data series is named "Other." + +{{< note >}} When MIN is selected as the aggregation method, the top-10 series are sorted ascending, lowest-to-highest. For all of the other aggregation methods, the top-10 values are sorted descending, highest-to-lowest. {{< /note >}} + +  + +## Time Range + +Use the time range selector above the chart to select the time range you want to examine. You can specify a custom time range if the predefined options aren't what you need. + +The granularity of the data series is based on the selected time range and can vary from 30 seconds to five days to make the chart easier to read. + +When you change the time range, the [list of resources](#select-a-resource) is updated correspondingly and it only includes the resources which have the data for the selected time range. + +## Compare To + +Next to the [time range](#time-range) selector, you'll find the `Compare To` list of options. This list allows you to compare data for the selected time frame with data from an earlier period. For example, you may want to view CPU usage for the last hour and compare the results to the same time from yesterday, last week, or even the previous year. + +{{< img src="/controller/analytics/data-explorer/images/data-explorer_comparison.png">}} + +{{< note >}} + +- When comparison is turned on for a data series, the data have the suffix "Compare" in their names. +- If there is no data available for a comparison period, the comparison data series is not shown. +- When a Group By dimension is applied, data comparisons are made only with the top-10 data series and not with the "Other" series, if there is one. See the [Group By](#group-by) section for a discussion of the top-10 and "Other" series. +{{< /note >}} + +  + +## Show Query + +On the Data Explorer details page, you can select the **Show Query** button (eye icon) to view the URL that's used to query the Metrics API to get the data you see in the chart. If you copy the query and use it outside of NGINX Controller, you'll get the same data but in JSON format. + +The query updates whenever the selection options change. The query doesn't include requests for comparison data. + +{{< see-also >}} +For instructions on how to understand the Metrics API response, refer to the topic [Using the Metrics API]({{< relref "/controller/analytics/metrics/metrics-api#understanding-the-metrics-api-response" >}}). +{{< /see-also >}} + +  + +## Dimensions panel + +On the right of the screen there is a panel with the list of dimensions available for the [selected metric](#metrics). + +{{< img src="/controller/analytics/data-explorer/images/data-explorer_dimensions-drawer.png">}} + +Each dimension is presented as a section in which you can expand and see the values for it. The values are aggregated with the [selected aggregation method](#aggregation-mode) for the [selected time range](#time-range). They depend on the following selected parameters: + +- [context](#select-the-context) +- [resource](#select-a-resource) +- [metric](#metrics) +- [aggregation](#aggregation-mode) +- [time range](#time-range) + +When one of the parameters changes, you'll see the values for expanded dimensions are also updated. + +You can see only top-10 values for each dimension, and based on the [selected aggregation](#aggregation-mode), they are sorted in following ways: + +- When MIN is selected as the aggregation method, the top-10 series are sorted ascending, lowest-to-highest. +- For all of the other aggregation methods, the top-10 values are sorted descending, highest-to-lowest. + +{{< note >}} + +- When the selected metric changes, the list of dimensions may change as well, and some of the dimensions you recently explored may disappear from the panel. +- This panel was added in NGINX Controller v3.18. + +{{< /note >}} + +  + +{{< versions "3.17" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/data-explorer/images/data-explorer_aggregation.png b/content/controller/analytics/data-explorer/images/data-explorer_aggregation.png new file mode 100644 index 000000000..9b529cd8e Binary files /dev/null and b/content/controller/analytics/data-explorer/images/data-explorer_aggregation.png differ diff --git a/content/controller/analytics/data-explorer/images/data-explorer_comparison.png b/content/controller/analytics/data-explorer/images/data-explorer_comparison.png new file mode 100644 index 000000000..b9fe2ca29 Binary files /dev/null and b/content/controller/analytics/data-explorer/images/data-explorer_comparison.png differ diff --git a/content/controller/analytics/data-explorer/images/data-explorer_dimensions-drawer.png b/content/controller/analytics/data-explorer/images/data-explorer_dimensions-drawer.png new file mode 100644 index 000000000..e592fc3a5 Binary files /dev/null and b/content/controller/analytics/data-explorer/images/data-explorer_dimensions-drawer.png differ diff --git a/content/controller/analytics/data-explorer/images/data-explorer_group-by.png b/content/controller/analytics/data-explorer/images/data-explorer_group-by.png new file mode 100644 index 000000000..ed54fcac6 Binary files /dev/null and b/content/controller/analytics/data-explorer/images/data-explorer_group-by.png differ diff --git a/content/controller/analytics/data-explorer/images/data-explorer_main-view.png b/content/controller/analytics/data-explorer/images/data-explorer_main-view.png new file mode 100644 index 000000000..bdac1ddd7 Binary files /dev/null and b/content/controller/analytics/data-explorer/images/data-explorer_main-view.png differ diff --git a/content/controller/analytics/data-explorer/images/data-explorer_metric.png b/content/controller/analytics/data-explorer/images/data-explorer_metric.png new file mode 100644 index 000000000..abd160e34 Binary files /dev/null and b/content/controller/analytics/data-explorer/images/data-explorer_metric.png differ diff --git a/content/controller/analytics/data-explorer/images/data-explorer_resource.png b/content/controller/analytics/data-explorer/images/data-explorer_resource.png new file mode 100644 index 000000000..1c2781260 Binary files /dev/null and b/content/controller/analytics/data-explorer/images/data-explorer_resource.png differ diff --git a/content/controller/analytics/events/_index.md b/content/controller/analytics/events/_index.md new file mode 100644 index 000000000..0dff2641d --- /dev/null +++ b/content/controller/analytics/events/_index.md @@ -0,0 +1,10 @@ +--- +description: View system events and audit logs. +menu: + docs: + parent: Analytics + weight: 30 +title: Events +weight: 140 +url: /nginx-controller/analytics/events/ +--- diff --git a/content/controller/analytics/events/view-events.md b/content/controller/analytics/events/view-events.md new file mode 100644 index 000000000..6e28bfb8c --- /dev/null +++ b/content/controller/analytics/events/view-events.md @@ -0,0 +1,48 @@ +--- +description: View the audit log of system and user actions. +docs: DOCS-530 +doctypes: +- task +tags: +- docs +title: View Events +toc: true +weight: 20 +--- + +## Overview + +The Events page shows a log of the system and user actions for F5 NGINX Controller. The logs are organized by event categories and levels, making it easy to identify and review issues. + +## View Events + +Take the steps below to view NGINX Controller events: + +1. Open the NGINX Controller user interface and log in. +1. On the Analytics menu, select **Events**. +1. To view additional information about a particular event, select the event from the list to open the details pane. + +You can filter the events by typing a keyword in the search box and/or by selecting a time period. You can filter the results further by [Event Categories](#event-categories) or [Event Levels](#event-levels). + +## Event Categories + +You can select from the following Event Categories: + +- Agent Events; +- Agent Status Events; +- Controller Events; +- Audit Events -- a log of all actions performed by NGINX Controller users; +- Forwarder Notifications -- events emitted by [Data Forwarders]({{< relref "/controller/analytics/forwarders/_index.md" >}}) +- Workload Health Events -- events emitted by the Controller Agent when the health of an upstream server changes; + +To view the logs for a specific category, select the category name from the **Event Categories** list. + +## Event Levels + +Event levels sort events according to their information level: Debug, Info, Error, Warning, and Critical. + +To view the logs for a specific level, select the level name from the **Event Levels** list. + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/forwarders/_index.md b/content/controller/analytics/forwarders/_index.md new file mode 100644 index 000000000..77ce17e24 --- /dev/null +++ b/content/controller/analytics/forwarders/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn how to forward data from F5 NGINX Controller to external services. +menu: + docs: + parent: Analytics + weight: 40 +title: Data Forwarders +weight: 130 +url: /nginx-controller/analytics/forwarders/ +--- diff --git a/content/controller/analytics/forwarders/forward-analytics-to-datadog.md b/content/controller/analytics/forwarders/forward-analytics-to-datadog.md new file mode 100644 index 000000000..098d33703 --- /dev/null +++ b/content/controller/analytics/forwarders/forward-analytics-to-datadog.md @@ -0,0 +1,77 @@ +--- +description: How to forward Analytics data to Datadog. +docs: DOCS-531 +doctypes: +- tutorial +tags: +- docs +title: Forward Analytics Data to Datadog +toc: true +weight: 100 +--- + +## Overview + +Follow the steps in this guide to set up an F5 NGINX Controller Integration that forwards data to [Datadog](https://www.datadoghq.com/). + +## Before You Begin + +This guide assumes that you are already an active Datadog user. If you haven't already done so, you will need to [install and configure Datadog](https://docs.datadoghq.com/) before you proceed. + +You will also need to [Create an Integration]({{< relref "/controller/platform/integrations/datadog-integration.md" >}}) for your Datadog forwarder. + +## Create a Forwarder + +Take the following steps to create a Forwarder for Datadog: + +1. Open the NGINX Controller user interface and log in. +2. Select the NGINX Controller menu icon, then select **Platform**. +3. On the **Platform** menu, select **Data Forwarders**. +4. On the **Data Forwarders** menu, select the **Create Data Forwarder** quick action. +5. Add a name. +6. (Optional) Add a display name. +7. (Optional) Add a description. +8. Select your **Integration Reference** from the dropdown menu or select **Create New** to create a new Integration. +9. In the **Collector Type** list, select `DATADOG`. +10. In the **Source** list, select the type of data to forward: `metrics` or `events`. +11. In the **Output Format** list, select `DATADOG`. +12. The **Selector** field consists of the following query parameters (optional): + +- `names` (inapplicable for `EVENTS`): The list of metrics names that you want to forward. +- `excluded_names` (inapplicable for `EVENTS`): The list of metric names that you don't want to forward. +- `filter`: The conditions to use to refine the metrics or events data. +- Example usage when selecting metrics: `"names=nginx.*&excluded_names=nginx.upstream.*filter=app='myapp'"` +- Example usage when selecting events: `"filter=type='security violation' AND app='my-app'"` + +13. (Optional) Add additional **Streams** as required using the **Add Stream** button. + +{{< important >}} + +Each metric will be prefixed with a common namespace -- such as "nginx-controller" -- before it is sent to Datadog. This prefix is used by Datadog only and is not applied to any of the internal NGINX Controller metrics. Refer to the [metrics catalog]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) for the full list of valid metric names. + +For events, the "nginx-controller" namespace is added to the ["ddsource" key](https://docs.datadoghq.com/api/v1/logs/#send-logs). + +{{< /important >}} + +NGINX Controller events are sent to Datadog as logs and NGINX Controller dimensions are sent as tags. The Forwarder converts the dimension data to comply with the Datadog [tags format](https://docs.datadoghq.com/getting_started/tagging/#defining-tags) prior to forwarding it. In some cases, the original dimension value may be transformed to fit the tag requirements. This includes replacing comma characters (`,`) with semicolons (`;`) to ensure that Datadog will properly handle the incoming payload. + +{{< see-also >}} + +See the [NGINX Controller Metrics]({{< relref "/controller/analytics/metrics/_index.md" >}}) docs for more information. + +{{< /see-also >}} + +## Verification + +Soon after you create the Datadog forwarder, you can view the selected metrics in Datadog. + +1. Log into the [Datadog web interface](https://app.datadoghq.com/). +2. On the navigation menu, select **Metrics** > **Summary**. + +## What's Next + +- Refer to [Troubleshooting Forwaders]({{< relref "/controller/support/troubleshooting-forwarders.md" >}}) for tips on resolving common issues. + +{{< versions "3.8" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/forwarders/forward-analytics-to-otlp.md b/content/controller/analytics/forwarders/forward-analytics-to-otlp.md new file mode 100644 index 000000000..d1aa43915 --- /dev/null +++ b/content/controller/analytics/forwarders/forward-analytics-to-otlp.md @@ -0,0 +1,67 @@ +--- +description: How to forward Analytics Metrics to OpenTelemetry Collector. +docs: DOCS-532 +doctypes: +- tutorial +tags: +- docs +title: Forward Analytics Metrics to OpenTelemetry Collector +toc: true +weight: 201 +--- + +## Overview + +Follow the steps in this guide to set up an F5 NGINX Controller integration that forwards metrics to OpenTelemetry Collector. + +## Before You Begin + +This guide assumes that you already have a working instance of any OpenTelemetry Collector. + +You will also need to [Create an Integration]({{< relref "/controller/platform/integrations/otlp-integration.md" >}}) for your OpenTelemetry Collector forwarder. + +## Create a Forwarder + +Take the following steps to create a forwarder for OpenTelemetry Collector: + +1. Open the NGINX Controller user interface and log in. +2. Select the NGINX Controller menu icon, then select **Platform**. +3. On the **Platform** menu, select **Data Forwarders**. +4. On the **Data Forwarders** menu, select **Create Data Forwarder**. +5. Add a name. +6. (Optional) Add a display name. +7. (Optional) Add a description. +8. Select your **Integration Reference** from the dropdown list, or select **Create New** to create a new integration. +9. In the **Collector Type** list, select `OTLP_HTTP` or `OTLP_GRPC`. +10. In the **Source** list, select the type of data to forward: `METRICS`. +11. In the **Output Format** list, select `OTLP`. +12. The **Selector** field consists of the following query parameters (optional): + +- `names`: The list of metrics names that you want to forward. +- `excluded_names`: The list of metric names that you don't want to forward. +- `filter`: The conditions to use to refine the metrics data. +- Example usage when selecting metrics: `"names=nginx.*&excluded_names=nginx.upstream.*&filter=app='myapp'"` + +13. (Optional) Select **Add Stream** to add additional streams, as needed. + +{{< important >}} + +Each metric is prefixed with a common namespace -- for example, "nginx-controller" -- before it's sent to OpenTelemetry Collector. This prefix is used only by OpenTelemetry Collector and is not applied to any internal NGINX Controller metrics. Refer to the [metrics catalog]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) for the full list of valid metric names. + +We have tested compatability with OTLP collector v0.33.0. We will most likely support versions higher than this, assuming backwards compatability from OTLP. + +{{< /important >}} + +{{< see-also >}} + +See the [NGINX Controller Metrics]({{< relref "/controller/analytics/metrics/_index.md" >}}) docs for more information. + +{{< /see-also >}} + +## What's Next + +- Refer to [Troubleshooting Forwaders]({{< relref "/controller/support/troubleshooting-forwarders.md" >}}) for tips on resolving common issues. + +{{< versions "3.16" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/forwarders/forward-analytics-to-splunk.md b/content/controller/analytics/forwarders/forward-analytics-to-splunk.md new file mode 100644 index 000000000..3c94a73e6 --- /dev/null +++ b/content/controller/analytics/forwarders/forward-analytics-to-splunk.md @@ -0,0 +1,68 @@ +--- +description: How to forward Analytics data to Splunk. +docs: DOCS-533 +doctypes: +- tutorial +tags: +- docs +title: Forward Analytics Data to Splunk +toc: true +weight: 200 +--- + +## Overview + +Follow the steps in this guide to set up an F5 NGINX Controller Integration that forwards data to [Splunk](https://www.splunk.com/). + +## Before You Begin + +This guide assumes that you are already an active Splunk user. If you haven't already done so, you will need to [install and configure Splunk](https://docs.splunk.com/Documentation) before you proceed. + +You will also need to [Create an Integration]({{< relref "/controller/platform/integrations/splunk-integration.md" >}}) for your Splunk forwarder. + +## Create a Forwarder + +Take the following steps to create a Forwarder for Splunk: + +1. Open the NGINX Controller user interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Data Forwarders**. +1. On the **Data Forwarders** menu, select the **Create Data Forwarder** quick action. +1. Add a name. +1. (Optional) Add a display name. +1. (Optional) Add a description. +1. Select your **Integration Reference** from the dropdown menu or select **Create New** to create a new Integration. +1. In the **Collector Type** list, select `SPLUNK`. +1. In the **Source** list, select the type of data to forward: `metrics` or `events`. +1. In the **Output Format** list, select `SPLUNK`. +1. The **Selector** field consists of the following query parameters (optional): + + - `names` (inapplicable for `EVENTS`): The list of metrics names that you want to forward. + - `excluded_names` (inapplicable for `EVENTS`): The list of metric names that you don't want to forward. + - `filter`: The conditions to use to refine the metrics or events data. + - Example usage when selecting metrics: `"names=nginx.*&excluded_names=nginx.upstream.*filter=app='myapp'"` + - Example usage when selecting events: `"filter=type='security violation' AND app='my-app'"` + +1. (Optional) Add additional **Streams** as required using the **Add Stream** button. + +{{< important >}} + +Each metric will be prefixed with a common namespace -- such as `nginx-controller` -- before it is sent to Splunk. This prefix is used by Splunk only and is not applied to any of the internal NGINX Controller metrics. Refer to the [metrics catalog]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) for the full list of valid metric names. + +In case of events, the "nginx-controller" namespace will be placed in the ["source" key](https://docs.splunk.com/Documentation/Splunk/8.1.1/Data/FormateventsforHTTPEventCollector#Event_metadata) and sent with each event. + +{{< /important >}} + +{{< see-also >}} + +See the [NGINX Controller Metrics]({{< relref "/controller/analytics/metrics/_index.md" >}}) docs for more information. + +{{< /see-also >}} + +## What's Next + +- Refer to [Troubleshooting Forwaders]({{< relref "/controller/support/troubleshooting-forwarders.md" >}}) for tips on resolving common issues. + +{{< versions "3.6" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/forwarders/forward-analytics-to-syslog.md b/content/controller/analytics/forwarders/forward-analytics-to-syslog.md new file mode 100644 index 000000000..018a31c30 --- /dev/null +++ b/content/controller/analytics/forwarders/forward-analytics-to-syslog.md @@ -0,0 +1,53 @@ +--- +description: How to forward Analytics Events to Syslog. +docs: DOCS-534 +doctypes: +- tutorial +tags: +- docs +title: Forward Analytics Events to Syslog +toc: true +weight: 201 +--- + +## Overview + +Follow the steps in this guide to set up a F5 NGINX Controller Integration that forwards events to a syslog server. + +## Before You Begin + +This guide assumes that you already have a working instance of any syslog server. + +If you haven't already done so, you can use an open-source version of [Syslog-NG](https://www.syslog-ng.com/products/open-source-log-management/). + +You will also need to [Create an Integration]({{< relref "/controller/platform/integrations/syslog-integration.md" >}}) for your Syslog forwarder. + +## Create a Forwarder + +Take the following steps to create a Forwarder for Splunk: + +1. Open the NGINX Controller user interface and log in. +1. Select the NGINX Controller menu icon, then select **Platform**. +1. On the **Platform** menu, select **Data Forwarders**. +1. On the **Data Forwarders** menu, select the **Create Data Forwarder** quick action. +1. Add a name. +1. (Optional) Add a display name. +1. (Optional) Add a description. +1. Select your **Integration Reference** from the dropdown menu or select **Create New** to create a new Integration. +1. In the **Collector Type** list, select `SYSLOG`. +1. In the **Source** list, select the type of data to forward: `events`. NGINX Controller can forward only `EVENTS` data to syslog. +1. In the **Output Format** list, select `SYSLOG`. +1. The **Selector** field consists of the following query parameters (optional): + + - `filter`: The conditions to use to refine the metrics or events data. + - Example usage: `"filter=type='security violation' AND app='my-app'"` + +1. (Optional) Add additional **Streams** as required using the **Add Stream** button. + +## What's Next + +- Refer to [Troubleshooting Forwaders]({{< relref "/controller/support/troubleshooting-forwarders.md" >}}) for tips on resolving common issues. + +{{< versions "3.16" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/metrics/_index.md b/content/controller/analytics/metrics/_index.md new file mode 100644 index 000000000..e5640c17d --- /dev/null +++ b/content/controller/analytics/metrics/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn about F5 NGINX Controller Metrics. +menu: + docs: + parent: Analytics + weight: 50 +title: Metrics +weight: 150 +url: /nginx-controller/analytics/metrics/ +--- diff --git a/content/controller/analytics/metrics/metrics-api.md b/content/controller/analytics/metrics/metrics-api.md new file mode 100644 index 000000000..52b85d6ed --- /dev/null +++ b/content/controller/analytics/metrics/metrics-api.md @@ -0,0 +1,481 @@ +--- +description: Tips and tricks for using the Metrics API query parameters to refine + your data. +docs: DOCS-535 +doctypes: +- tutorial +tags: +- docs +title: Using the Metrics API +toc: true +weight: 50 +--- + +## Overview + +You can use the F5 NGINX Controller Analytics module to monitor your NGINX instances and evaluate your applications' performance. The [Metrics API]({{< relref "/controller/api/_index.md" >}}) query parameters let you fine-tune your system data based on parameters such as time window, aggregation, time resolution, and filter. + +By using different combinations of these query parameters, you can gather information that lets you: + +- Identify which of your Apps receives the most traffic -- query for the highest number of requests among all apps. +- Understand the behavior of your back-end server(s) -- query for upstream latency by instance or location. +- Monitor your application performance -- filter on HTTP response codes to track the number of successful or failed requests by app and environment. +- Understand how your App behavior and/or usage changes across version releases -- compare data like the examples above across different versions of your application. + +## Usage + +You can use the NGINX Controller [Metrics API]({{< relref "/controller/api/_index.md" >}}) to query for desired metric names and fine-tune the data returned based on the following parameters: + +- time window (`startTime` and `endTime`) +- `filter` +- `resolution` +- `groupBy` +- `seriesLimit` +- `orderSeriesBy` +- `dimensions` + +{{< note >}} +Because NGINX Controller is constantly evolving, these example metrics and dimensions may differ from what you see with your NGINX Controller instance. Some metrics may require pre-configured applications to be visible in the API. +{{< /note >}} + +### Understanding the Metrics API Response + +The [Metrics API]({{< relref "/controller/api/_index.md" >}}) response consists of query metadata and an array of `metrics` -- one array element for each queried metric. + +- The **metric** object includes the queried metric name and an array of data series associated with the metric. +- The **series** object groups metrics data according to dimension values. The series consists of dimensions (key-value map), timestamps, and the timestamps' metric values. + +```json +{ + "metrics":[ + { + "name":"http.request.count", + "series":[ + { + "dimensions":{ + "app":"app-name", + "component":"component-name", + "environment":"environment-name", + "gateway":"gateway-name", + "instance":"instance-name" + }, + "timestamps":[ + "2020-07-01T12:00:00Z" + ], + "values":[ + 1000 + ] + }, + { + "dimensions":{ + "app":"app-name-2", + "component":"component-name", + "environment":"environment-name", + "gateway":"gateway-name", + "instance":"instance-name" + }, + "timestamps":[ + "2020-07-01T12:00:00Z" + ], + "values":[ + 2000 + ] + } + ] + } + ], + "queryMetadata":{ + "endTime":"2020-07-01T12:00:00.970106672Z" + } +} +``` + +In the preceding example, there are two data series for the queried metric. The differentiator between the two series is the "app" name. This name is what makes NGINX metrics app-centric: you can easily distinguish metrics based on their dimensions' values, such as an App, Environment, or Gateway name. + +You can view the full list of the supported metrics and dimensions, with detailed descriptions, by querying the Catalog API: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/catalogs/metrics" +``` + +Likewise, you can get a full list of the available dimensions by querying the Catalogs API: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/catalogs/dimensions" +``` + +This information is also provided in the [Catalogs Reference]({{< relref "/controller/analytics/catalogs/_index.md" >}})). + +### Querying the Metrics API + +This section provides an overview of each query parameter and examples of using the parameters together to refine your data. + +The examples progress from basic usage to more advanced API queries. + +#### Names + +The `names` parameter is the only required parameter in the [Metrics API]({{< relref "/controller/api/_index.md" >}}). + +The following example query returns a response with the last recorded value for the queried metric: `http.request.count`: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count" +``` + +If the dimension values differ, the `series` array in the response will contain multiple items. + +It is possible to query the API for several metrics simultaneously. To do so, provide the metric names as a comma-separated list: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count,http.request.bytes_rcvd" +``` + +#### Time Window + +To get more than the last recorded value for the queried metric, use the following time window parameters: + +- `startTime` indicates the start of the time window to include metrics from (inclusive). +- `endTime` means the end of the time window to include metrics from (non-inclusive). + +There are a few rules to remember when working with time window parameters: + +- If you provide an `endTime`, you must also provide a `startTime`; +- `endTime` must be greater than `startTime`; +- If you give a `startTime` but don't give an `endTime`, the `endTime` defaults to the current time. + +You can define time using the `ISO 8601` format or as an offset (for example, `2020-07-14T13:07:11Z`). An offset is a string that starts with `+` or `-`, followed by a number and a unit of time: `y`, `M`, `w`, `d`, `h`, `m`, or `s`. You can also use `now` to indicate the current timestamp. + +The following example request returns all the recorded metric values for the last three hours. + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count&startTime=now-3h" +``` + +The following example query contains a fully defined time window: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count&startTime=now-5h&endTime=2020-07-01T09:00:00Z" +``` + +In this case, the response contains metrics from 05:00:00 to 09:00:00 on the 1st of July 2020. + +#### Aggregations + +Using only `names` and time window parameters will give you the raw data points of metrics values. + +To get a more organized response, you can provide an aggregate function for each queried metric: `AVG`, `SUM`, `COUNT`, `MAX`, `MIN`, or `RATE`. + +{{< note >}} +In the following definitions, `time period` refers to the `resolution` (if provided) or the difference between the `endTime` and `startTime` (when `resolution` is not provided). +{{< /note >}} + +- `AVG` - calculates the average value of the metric data samples over the period +- `SUM` - calculates the total value of the metric data samples over the period +- `COUNT` - returns the number of collected data samples of the metric over the period +- `MIN`/`MAX` - returns the minimal/maximal data sample of the metric from the given period +- `RATE` - returns an average value of the metric calculated per second (always *per second*, regardless of the provided `resolution`), based on the data available in the given period + +{{< note >}} +You must define a `startTime` when using aggregate functions. +{{< /note >}} + +{{< see-also >}} +The list of supported aggregate functions for any particular metric is available in the [Metrics Catalog]({{< relref "/controller/analytics/catalogs/metrics.md" >}})). +{{< /see-also >}} + +For example, the following query returns a single value (per dimension set), which is the sum of the metric values for the last three hours. To get proper values, ensure that the `endTime` is greater than the `startTime`. + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&startTime=now-3h" +``` + +It is possible to use aggregated and non-aggregated metrics in a single query. For this query, the [Metrics API]({{< relref "/controller/api/_index.md" >}}) returns a single value per dimension set. That value is the sum of all of the metric's values for the last three hours. + +For example: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count),http.request.bytes_rcvd&startTime=now-3h" +``` + +{{< important >}} +Using AVG aggregation with traffic metrics with the `.total` suffix may cause confusion because traffic metrics are already aggregated. To learn more, refer to the [Overview: Traffic Metrics]({{< relref "/controller/analytics/metrics/overview-traffic-metrics.md" >}})) topics. +{{< /important >}} + +#### Resolution + +If you want to change the returned data's granularity, you can use `resolution` parameter. This parameter must be used in conjunction with an aggregation function and a time window (at least `startTime` must be provided). + +The `resolution` parameter must be a valid duration. The duration is a string that starts with a number, followed by a unit of time: `y`, `M`, `w`, `d`, `h`, `m`, or `s`. + +The following example query returns three aggregated metric values. Here, we're asking for the data from last three hours with one-hour granularity: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count),&startTime=now-3h&resolution=1h" +``` + +There may be situations when the returned resolution is lower than that requested in the query. This has to do with metrics retention periods—the older the metric, the lower the resolution. + +If the time window contains metrics with a lower resolution than was queried for, the API downsizes the granularity to the lowest possible value. You will see a warning in the `responseMetadata`: + +```json +"responseMetadata": { + "warning": "Time window is above 8 days, Resolution is downsized to 300 seconds" +} +``` + +If no `resolution` is provided, the maximum available resolution is returned. This is calculated as `endTime` - `startTime`. + +#### Filter + +This parameter, as the name indicates, filters results based on the value of dimensions. Filtering by dimension value can help to refine the data that's returned into a more specific set. + +The `filter` query consists of one or more predicates in the form of ``, where: + +- `` is the name of the dimension; +- `` is one of the supported operators (`=`, `!=`, `<`, `<=`, `>=` `>`, `in` or `not`); +- `` is value of the dimension(s) that you want to filter on. + +For example, the following query includes a simple filter on the app name. The query returns data for the application named `app1` for the last three hours. + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count&filter=app='app1'&startTime=now-3h" +``` + +{{< tip >}} + +- Predicates can be combined into logical expressions using `OR`, `AND`, and `(` `)`. +- For matching values, wildcard (`*`) use is supported. +- We recommend wrapping predicates in single quotes to ensure that the full query string is processed correctly. + +{{< /tip >}} + +The following example request uses `filter` with logical expressions: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count&filter=app='ap*' and environment='prod'&startTime=now-3h" +``` + +#### GroupBy + +Using filters and aggregation functions may not be enough to allow you to get comprehensive information about a specific application or environment. + +The `groupBy` parameter helps to gather results according to the specified dimension(s). You can provide multiple dimension names as a comma-separated list. + +{{< note >}} + +- When using `groupBy`, you must use an aggregate function and a time window (`startTime` must be defined; `endTime` is optional). +- If a request contains aggregated and non-aggregated metrics, the `groupBy` parameter will apply only to the aggregated metrics. + +{{< /note >}} + +For example, the following query returns data for any application with a name that starts with `ap` in the `prod` environment for the last three hours. + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&groupBy=app,alias&startTime=now-3h" +``` + +The API response for the query looks similar to the following: + +```json +{ + "metrics":[ + { + "aggr": "SUM", + "name":"http.request.count", + "series":[ + { + "dimensions":{ + "app":"app-name", + "alias": "alias1" + }, + "timestamps":[ + "2020-07-01T12:00:00Z" + ], + "values":[ + 1000 + ] + }, + { + "dimensions":{ + "app":"app-name-2", + "component":"alias1" + }, + "timestamps":[ + "2020-07-01T12:00:00Z" + ], + "values":[ + 2000 + ] + } + ] + } + ], + "queryMetadata":{ + "endTime":"2020-07-01T12:00:00.970106672Z" + } +} +``` + +The API returns the data for the last three hours grouped by `app` and `alias` dimensions. Unlike other queries, the API only returns those dimensions that have been selected in `groupBy`. However, the series of different dimension values are still distinguished. + +#### SeriesLimit and OrderSeriesBy + +There are cases when you might want to view only a specific data series (for example, "Top-5"). To query the API for a particular series of data, you can define the `seriesLimit` and `orderSeriesBy` query parameters. + +- `seriesLimit` sets an upper limit on the number of series returned. +- `orderSeriesBy` sorts the series values according to the order specified: + + - Must consist of two tokens -- an aggregate function and a sort order. For example, `SUM DESC`, `MIN ASC`, and so on. + - Can be used only in combination with `seriesLimit`. + +When you specify a `seriesLimit`, the response always includes one other series with an `all` metric. This series aggregates the metric values of all the series that are not included in the result. If the total number of series returned is greater than the limit specified in the query parameter, an additional series named `other` is returned. This series aggregates the metrics values of the series outside of the specified limit. + +{{< note >}} +When using `seriesLimit`, you can only specify one metric name in the `names` parameter and one `groupBy` parameter. +{{< /note >}} + +**Example 1** +The following example request uses `seriesLimit` to restrict the data returned to five series: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&groupBy=app&seriesLimit=5&startTime=now-3h&resolution=5m +``` + +The response contains data for the last three hours, grouped by the `app` and `alias` dimensions. Unlike the other example queries, in this example, the API returns just those dimensions that have been selected in `groupBy`. Each dimension and its corresponding values are provided as distinct items in a series. + +**Example 2** +The following example query uses both `seriesLimit` and `orderSeriesBy`: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(some.metric.name)&groupBy=someDimension&seriesLimit=5&orderSeriesBy=MAX DESC&startTime=now-1d&endTime=now&resolution=5m +``` + +**Example 3** +Building on the previous examples, here we use `seriesLimit` and `orderSeriesBy` to get the top-5 URIs with the highest number of bytes received for a specific App and Environment: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.bytes_rcvd)&startTime=now-1h&filter=app='app1' AND environment='qa'&groupBy=http.uri&seriesLimit=5&orderSeriesBy=MAX DESC +``` + +In this case, the API returns five data series for the last hour ordered by MAX value in descending order for bytes received per URL, where the data is related to the application `app1` deployed on the environment `prod`. + +Together, these parameters are particularly useful for refining data. The `seriesLimit` says how many series should be returned, `orderSeriesBy` parameter defines the criteria for ordering series. + +#### Dimensions + +You can use the `dimensions` query parameter to specify which dimension(s) should be included in each metric series' response. + +Dimensions not specified in the query parameter will not be included in the response. This may result in some series having the same dimension set but being returned as separate list items. + +The following example returns results for the specified metric, where `dimensions=environment`: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&dimensions=environment&startTime=now-3h +``` + +If you have multiple Apps, the response looks similar to the following example: + +```json +{ + "metrics":[ + { + "aggr": "SUM", + "name":"http.request.count", + "series":[ + { + "dimensions":{ + "environment":"prod" + }, + "timestamps":[ + "2020-07-01T12:00:00Z" + ], + "values":[ + 1000 + ] + }, + { + "dimensions":{ + "environment":"prod" + }, + "timestamps":[ + "2020-07-01T12:00:00Z" + ], + "values":[ + 2000 + ] + } + ] + } + ], + "queryMetadata":{ + "endTime":"2020-07-01T12:00:00.970106672Z" + } +} +``` + +If `dimensions` and `groupBy` parameters are both used, the list of provided `dimensions` must be a subset of the list provided in `groupBy`. + +The following example uses `dimensions` with `groupBy`: + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&groupBy=app,location&dimensions=app&startTime=now-3h&resolution=5m +``` + +The `dimensions` parameter also lets you omit the dimensions from the response altogether. To do so, define `dimensions` as an empty list (`dimensions=`). + +This results in several data series for the `http.request.count` metric without any dimensions being visible. That is not useful on its own; however, if you combine the empty `dimensions` parameter with metric aggregation, you will receive a single series with aggregated values. + +For example, the following example query sums all the values in all of the series of the `http.request.count` metric for the past three hours using the default `resolution`. + +```curl +curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&startTime=now-3h&dimensions= +``` + +The response looks similar to the following example: + +```json +{ + "metrics":[ + { + "aggr": "SUM", + "name":"http.request.count", + "series":[ + { + "dimensions":{}, + "timestamps":[ + "2020-07-01T12:00:00Z", + "2020-07-01T12:00:30Z", + "2020-07-01T12:01:00Z", + "2020-07-01T12:01:30Z", + ... + ], + "values":[ + 3000, + 2500, + 2800, + 1900, + ... + ] + } + ] + } + ], + "queryMetadata":{ + "endTime":"2020-07-01T15:00:00Z" + } +} +``` + +{{< important >}} +You cannot use `dimensions` with the `seriesLimit` parameter. +{{< /important >}} + +## What's Next + +- [Metrics Reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}})) +- [Dimensions Reference]({{< relref "/controller/analytics/catalogs/dimensions.md" >}})) +- [Create Custom Dashboards]({{< relref "/controller/analytics/dashboards/custom-dashboards.md" >}})) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/metrics/overview-metrics-metadata.md b/content/controller/analytics/metrics/overview-metrics-metadata.md new file mode 100644 index 000000000..ff67020c8 --- /dev/null +++ b/content/controller/analytics/metrics/overview-metrics-metadata.md @@ -0,0 +1,81 @@ +--- +description: Understanding how the F5 NGINX Controller Agent collects and reports metrics + and metadata. +docs: DOCS-536 +doctypes: +- reference +tags: +- docs +title: 'Overview: Metrics and Metadata' +toc: true +weight: 20 +--- + +## Overview + +The data that F5 NGINX Controller collects can be divided into two categories: + +- **System metrics**: Data collected from the NGINX Plus API, the NGINX log files, and NGINX process state. +- **Traffic metrics**: Data related to processed traffic, with the ability to distinguish the Application, API endpoint, or Environment that traffic is directed through. + +{{< note >}} +The key difference between system and traffic metrics is that traffic metrics are pre-aggregated for each time period. +{{< /note >}} + +Metrics are published at a regular interval of 60 or 30 seconds for system and traffic metrics, respectively. + +This topic gives an overview of the traffic metrics. Also known as "app-centric" metrics, traffic metrics contain information that lets you easily identify the App to which the data applies. + +{{< see-also >}} +Refer to [View traffic metrics]({{< relref "/controller/analytics/metrics/view-traffic-metrics.md" >}}) for instructions on how to view traffic metrics using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}). +{{< /see-also >}} +## Metadata and Metrics That Are Reported + +The NGINX Controller Agent collects the following types of data: + +- **NGINX metrics.** The Agent collects NGINX-related metrics using the NGINX Plus API, and by monitoring the NGINX log files and NGINX process state. +- **AVRD metrics.** AVRD sends app-centric data, so each metric has assigned dimensions like "application name" or "gateway". These metrics are related to processed traffic (for example, the number of bytes sent to a particular URL/endpoint). +- **NGINX configuration.** After the initial installation, the NGINX configuration is uploaded to the NGINX Controller server. Configuration updates are also uploaded to the NGINX Controller server. +- **System metrics.** These are key metrics describing the system. For example: CPU usage, memory usage, network traffic, etc. +- **NGINX metadata.** These describe your NGINX instances, and include package data, build information, the path to the binary, build configuration options, and so on. NGINX metadata also includes the NGINX configuration elements. +- **System metadata.** These are the basic information about the OS environment where the Agent runs. For example, the hostname, uptime, OS flavor, and other data. + +For the full list of metrics, see the [Metrics Catalog Reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) + +## Metrics Collection and Reporting Process + +The Agent mostly uses Golang's [gopsutil](https://github.com/shirou/gopsutil) to collect OS metrics. + +While the Agent is running on the host, it collects metrics at regular 20-second intervals. Metrics then are downsampled and sent to the Controller server once per minute. The Agent reports metadata to the NGINX Controller server every minute. Changes to the metadata can be examined using the Controller user interface. + +NGINX Controller stores historical metrics data in an analytics database. Metrics are aggregated and rolled-up as follows: + +- Data not older than 8 days are stored with best possible resolution (usually 1 min). +- Data older than 8 days but not older than 30 days are stored with 5 min resolution. +- Data older than 30 days but not older than 15 months are stored with 1 hour resolution. +- Data older than 15 months are stored with 1 day resolution. + +### Parsing and Analyzing NGINX Configuration Files + +NGINX configuration updates are reported only when a configuration change is detected. + +The Agent checks the Controller server every 30 seconds for pending NGINX configuration changes. When changes are pending, the changes are applied and the NGINX is reloaded. Because the configuration is managed in the Controller server, the entire configuration is written to a single `nginx.conf` file. + +If the Agent cannot reach the Controller server to send the accumulated metrics, it continues to collect metrics and sends them to the Controller server as soon as connectivity is re-established. The maximum amount of data that can be buffered by the Agent is about 2 hour's worth of data. + +The Agent is able to automatically find all relevant NGINX configuration files, parse them, extract their logical structure, and send the associated JSON data to the Controller Server for further analysis and reporting. + +To parse SSL certificate metadata, the NGINX Controller Agent uses standard `openssl`(1) functions. SSL certificates are parsed and analyzed only when the corresponding [Agent settings]({{< relref "/controller/admin-guides/config-agent/configure-the-agent.md#default-agent-settings" >}}) are turned on. SSL certificate analysis is `off` by default. + +## Troubleshooting + +Most metrics are collected by the Agent without requiring the user to perform any additional setup. For troubleshooting instructions, see [Troubleshooting NGINX Controller Metrics]({{< relref "/controller/support/troubleshooting-controller.md" >}}). + +## What's Next + +- [Set up Metrics Collection]({{< relref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) +- [Metrics Reference]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/metrics/overview-traffic-metrics.md b/content/controller/analytics/metrics/overview-traffic-metrics.md new file mode 100644 index 000000000..de4b7271a --- /dev/null +++ b/content/controller/analytics/metrics/overview-traffic-metrics.md @@ -0,0 +1,80 @@ +--- +description: Understanding how traffic metrics are collected, aggregated, and reported. +docs: DOCS-537 +doctypes: +- concept +- reference +tags: +- docs +title: 'Overview: Traffic Metrics' +toc: true +weight: 100 +--- + +## Overview + +The data that F5 NGINX Controller collects can be divided into two categories: + +- **System metrics**: Data collected from the NGINX Plus API, the NGINX log files, and NGINX process state. +- **Traffic metrics**: Data related to processed traffic, with the ability to distinguish the Application, API endpoint, or Environment that traffic is directed through. + +{{< note >}} +The key difference between system and traffic metrics is that traffic metrics are pre-aggregated for each time period. +{{< /note >}} + +Metrics are published at a regular interval of 60 or 30 seconds for system and traffic metrics, respectively. + +This topic gives an overview of the traffic metrics. Also known as "app-centric" metrics, traffic metrics contain information that lets you easily identify the App to which the data applies. + +{{< see-also >}} +Refer to [View traffic metrics]({{< relref "/controller/analytics/metrics/view-traffic-metrics.md" >}}) for instructions on how to view traffic metrics using the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}). +{{< /see-also >}} + +## Available traffic metrics + +- `client.latency.{total | max | min | count}` +- `client.network.latency.{total | max | min | count}` +- `client.request.latency.{total | max | min | count}` +- `client.ttfb.latency.{total | max | min | count}` +- `client.response.latency.{total | max | min | count}` +- `upstream.network.latency.{total | max | min | count}` +- `upstream.header.latency.{total | max | min | count}` +- `upstream.response.latency.{total | max | min | count}` +- `http.request.bytes_rcvd` +- `http.request.bytes_sent` +- `http.request.count` + +{{< see-also >}} +Refer to the [NGINX Controller Metrics Catalog]({{< relref "/controller/analytics/catalogs/metrics.md" >}}) for details about these and the other metrics that NGINX Controller reports. +{{< /see-also>}} + +## Calculating traffic metrics + +As traffic flows through a configured application, NGINX Controller collects the traffic-related data. With heavy traffic, the number of single, distinguishable metrics can be challenging to discern. For this reason, the metric values are aggregated. + +The aggregation happens every publish period -- this period is stored in the `aggregation_duration` dimension, and is usually 30 seconds -- and is based on metric dimensions. + +Metrics are aggregated using four aggregation functions: + +- **SUM** for `http.request.bytes_rcvd`, `http.request.bytes_sent` and all metrics with `.total` suffix. +- **MAX** for metrics with `.max` suffix. +- **MIN** for metrics with `.min` suffix. +- **COUNT** for metrics with `.count` suffix. + +### Example + +To better understand how metrics are aggregated, consider the following example: + +Imagine you have one application configured with one URI (recorded in the `http.uri` dimension of each traffic-related metric). In the last 30 seconds, a user queried that URI five times. The `client.request.latency` values for the requests were: 1 ms, 2 ms, 3 ms, 4 ms, and 5 ms. + +The final metric values returned by the Metrics API will be: + +- `http.request.count` = 5 +- `client.request.latency.total` = 15 ms +- `client.request.latency.max` = 5 ms +- `client.request.latency.min` = 1 ms +- `client.request.latency.count` = 5 + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/metrics/view-traffic-metrics.md b/content/controller/analytics/metrics/view-traffic-metrics.md new file mode 100644 index 000000000..4afd3f773 --- /dev/null +++ b/content/controller/analytics/metrics/view-traffic-metrics.md @@ -0,0 +1,106 @@ +--- +description: How to view the traffic metrics gathered by NGINX Controller Analytics. +docs: DOCS-538 +doctypes: +- task +- tutorial +tags: +- docs +title: View Traffic Metrics +toc: true +weight: 150 +--- + +## Overview + +This topic explains how to use the [NGINX Controller REST API]({{< relref "/controller/api/_index.md" >}}) + to view traffic metrics. + +{{< see-also >}} +Refer to [Overview: Traffic Metrics]({{< relref "/controller/analytics/metrics/overview-traffic-metrics.md" >}}) to learn how NGINX Controller collects, aggregates, and reports traffic metrics. +{{< /see-also >}} + +## Before You Begin + +To view traffic metrics, first confirm that you've correctly configured NGINX Controller. + +The following resources should have the status `Configured`: + +- [Environment]({{< relref "/controller/services/manage-environments.md" >}}) +- [Gateway]({{< relref "/controller/services/manage-gateways.md" >}}) +- [App and Component]({{< relref "/controller/app-delivery/manage-apps.md" >}}) + +Initially, the graphs will display `No data yet`, and querying the Metrics API for traffic metrics will result in an empty response. As soon as the Component starts to receive traffic, the traffic-related data will be displayed in the graphs and the [Dashboards]({{< relref "/controller/analytics/dashboards/overview-dashboard.md" >}}) in the NGINX Controller user interface and will be returned in API responses. + +{{< note >}} +If traffic stops flowing to a resource (for example, an Application or Component), then no traffic metrics will be available for the resource. +{{< /note >}} + +## View Traffic Metrics Using the REST API + +- To view the full list of metrics and dimensions, send a GET request to the `/analytics/catalogs/metrics` endpoint: + + ```curl + curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/catalogs/metrics" + ``` + +- To view a detailed description for a metric, send a GET request to the `/analytics/catalogs/metrics/{metricName}` endpoint: + + ```curl + curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/catalogs/metrics/client.latency.total" + ``` + +- Likewise, to view the full list of available dimensions, send a GET request to the `/analytics/catalogs/dimensions` endpoint: + + ```curl + curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/catalogs/dimensions" + ``` + +{{< see-also >}} +Refer to the [Catalogs Reference]({{< relref "/controller/analytics/catalogs/_index.md" >}}) for information about all of the dimensions and metrics collected by NGINX Controller. +{{< /see-also >}} + +## Example REST API Queries for Traffic Metrics + +Because traffic metrics are already aggregated, you should be careful about using the Metrics API for aggregations. + +### Example 1 + +Goal: Retrieve the total number of requests for the last 3 hours: + +```curl +curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/metrics?names=SUM(http.request.count)&startTime=now-3h" +``` + +The Metrics API returns a single value per dimension set. That value is the sum of the aggregated values (in 30s intervals) for the last 3 hours. + +### Example 2 + +Goal: Retrieve an average value of max client latencies for my app -- let's call it `app1` -- for the last day: + +```curl +curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/metrics?names=AVG(client.latency.max)&startTime=now-24h&filter=app='app1'" +``` + +### Example 3 + +{{< important >}} +Because traffic metrics are pre-aggregated, using AVG aggregation with these metrics isn't recommended. +{{< /important >}} + +Imagine you have one application configured with one URI (recorded in the `http.uri` dimension of each traffic-related metric). In the last 30 seconds, a user queried that URI 5 times. The `client.request.latency` values for each request were: 1 ms, 2 ms, 3 ms, 4 ms, 5 ms. + +The final metric values returned by the Metrics API will be: + +- `client.request.latency.total` = 15 ms +- `client.request.latency.count` = 5 + +The following query returns the average `client.request.latency.total = 15`, as you have one aggregated sample with value 15. + +```curl +curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/metrics?names=AVG(client.request.latency.total)&startTime=now-24h" +``` + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/view-app-security-analytics.md b/content/controller/analytics/view-app-security-analytics.md new file mode 100644 index 000000000..d54262e27 --- /dev/null +++ b/content/controller/analytics/view-app-security-analytics.md @@ -0,0 +1,274 @@ +--- +description: How to view App Security Analytics. +docs: DOCS-539 +doctypes: +- concept +- reference +tags: +- task +title: View App Security Analytics +toc: true +weight: 500 +--- + +## Overview + +When App Security flags or blocks a request made to an App Component as a security violation, it generates an App Security event. +You can use the F5 NGINX Controller web interface or the REST API to view these events or their related statistics (measures). Metrics reflect the number of requests and bytes flagged or blocked. You can use the Security Violation Dimensions to help understand and interpret the analytics data. + +For descriptions of Security Metrics and Events Dimensions, refer to [About App Security]({{< relref "/controller/app-delivery/security/concepts/what-is-waf.md" >}}) page. + +## View App Security Analytics + +You can use the NGINX Controller user interface or the REST API to view App Security Analytics. You can use this data to get a quick, high-level understanding of how the App Security module processes requests to an App. + +1. Open the NGINX Controller user interface and log in. +2. On the Navigation Bar, select **Services**. +3. On the Services Menu, select **Apps**. +4. On the Apps Overview page, select the App name link. +5. Select **Security Analytics** under the Analytics sub-menu. + +## View Security Analytics for Components + +To view Security Analytics for individual Components, take the steps below. + +1. Open the NGINX Controller user interface and log in. +2. On the Navigation Bar, select **Services**. +3. On the Services Menu, select **Apps**. +4. On the Apps Overview page, select the App name link. +5. Select **Components** from the menu. Select the Component name link. +6. Select **Security Analytics** under the Analytics sub-menu. + +### View App Security Events + +To view app security events: + +1. Open the NGINX Controller user interface and log in. +2. On the Navigation Bar, select **Services**. +3. On the Services Menu, select **Apps**. +4. On the Apps Overview page, select the App name link. +5. Select **Security Events** under the Analytics sub-menu. + +### View Security Events for Components + +To view the security events for components, take the following steps: + +1. Open the NGINX Controller user interface and log in. +2. On the Navigation Bar, select **Services**. +3. On the Services Menu, select **Apps**. +4. On the Apps Overview page, select the App name link. +5. Select **Components** from the sub-menu. Select the Component name link. +6. Select **Security Events** under the Analytics sub-menu. + +## Example REST API Queries for App Security Metrics + +Requests which App Security has rejected or allowed: + +```curl +https://{{host}}/api/v1/analytics/metrics? + startTime=0& + endTime=now& + names=sum(http.request.count)& + groupBy=request_outcome& + resolution=30m +``` + +Possible request outcomes are: + +- Passed: WAF allowed the request +- Rejected: WAF blocked the request + +To get request counts based on how App Security processed the traffic: + +```curl +https://{{host}}/api/v1/analytics/metrics? + startTime=0& + endTime=now& + resolution=5m& + names=sum(http.request.count)& + groupBy=request_outcome_reason& + filter=( + app='shopping' and + environment='prod' and + component='app-component') +``` + +| **request_outcome_reason values** | **Description** | +|--------------------------------|-----------------| +| \ | App Security did not process the traffic (in other words, App Security is not enabled). All events with this request_outcome_reason value should have a request_outcome `PASSED`.| +| SECURITY_WAF_OK | App Security processed the traffic and no violations are found. All events with this request_outcome_reason value should have a request_outcome of `PASSED`.| +| SECURITY_WAF_FLAGGED | App Security allowed the request, but it was flagged for review. All events with this request_outcome_reason value should have a request_outcome of `PASSED`.| +| SECURITY_WAF_VIOLATION | App Security identified one or more violations and rejected the request. All events with this request_outcome_reason value should have a request_outcome of `REJECTED`.| + +If you feel App Security is blocking too many requests, you can turn on monitor-only mode. + +### Security Violation Events + +You can use Security Violation Events to investigate violations identified by App Security for requests made to an App Component. Follow the steps below to view the Security Events: + +1. Open the NGINX Controller user interface and log in. +2. Select the NGINX Controller menu icon, then select **Analytics**. +3. On the **Analytics Menu**, select **Component**. + +You can use the following example Events requests to collect App Security Analytics data by using the NGINX Controller REST API: + +- To view ‘security violation’ Events: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation') + ``` + +- To get security violation details based on the Support ID seen on the request blocking page: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + waf.support_id='1880765231147185611') + ``` + +- To get all events where WAF rejected to investigate: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + request_outcome='REJECTED') + ``` + +- To get all events where WAF flagged to investigate: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + request_outcome_reason='SECURITY_WAF_FLAGGED') + ``` + +- To get all events where WAF has rejected or flagged to review: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + request_outcome_reason in ('SECURITY_WAF_VIOLATION','SECURITY_WAF_FLAGGED')) + ``` + +- To get all events where WAF has rejected or flagged for a specific App Component: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + request_outcome_reason in ('SECURITY_WAF_VIOLATION','SECURITY_WAF_FLAGGED') and + app='shopping' and + environment='prod' and + component='app-component') + ``` + + {{< tip >}} +To get all Events, remove the Environment, App, and Component filters from the request call. + {{< /tip >}} + +- To find requests flagged by App Security’s violation rating algorithm as a possible or likely threat: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + request_outcome_reason = 'SECURITY_WAF_FLAGGED' and + waf.violation_rating in ('POSSIBLE_ATTACK','MOST_LIKELY_ATTACK') and + app='shopping' and + environment='prod' and + component='app-component') + ``` + + {{< important >}} +This is important if you are using App Security WAF monitoring only mode. You can use it to understand the type of threats WAF believes should be blocked. + {{< /important >}} + +- To get Events that have triggered a specific signature-based violation by signature id: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + waf.signature_ids ='*200000098*' and + app='shopping' and + environment='prod' and + component='app-component') + ``` + + The substring search using wildcards or ‘IN’ operand should be used because each signature might be part of various combinations of signatures triggered by App Security per request. + +- To get Events that have triggered a specific a signature-based violation by signature id: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + waf.signature_names IN ('DIRECTORY_TRAVERSAL') and + app='shopping' and + environment='prod' and + component='app-component') + ``` + + The substring search using wildcards or ‘IN’ operand should be used because each signature might be part of various combinations of signatures triggered by App Security per request. + +- To get Events that triggered a particular attack type: + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + waf.attack_types='*Non-browser Client, Abuse of Functionality*' and + app='shopping' and + environment='prod' and + component='app-component') + ``` + + The substring search using wildcards or ‘IN’ operand should be used because each signature might be part of various combinations of attack types triggered by App Security per request. + +- To get Events from a remote address (client IP) + + ```curl + GET https://{{host}}/api/v1/analytics/events? + startTime=0& + endTime=now& + filter=( + category='security violation' and + http.remote_addr='172.18.71.147' and + app='shopping' and + environment='prod' and + component='app-component') + ``` + +## Related Pages + +- [About App Security]({{< relref "/controller/app-delivery/security/concepts/what-is-waf.md" >}}) + +{{< versions "3.11" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/api-management/_index.md b/content/controller/api-management/_index.md new file mode 100644 index 000000000..f8f0f8394 --- /dev/null +++ b/content/controller/api-management/_index.md @@ -0,0 +1,9 @@ +--- +description: Tasks for deploying and managing your APIs. +menu: + docs: + parent: NGINX Controller +title: API Management +weight: 155 +url: /nginx-controller/api-management/ +--- diff --git a/content/controller/api-management/manage-apis.md b/content/controller/api-management/manage-apis.md new file mode 100644 index 000000000..696158c9e --- /dev/null +++ b/content/controller/api-management/manage-apis.md @@ -0,0 +1,507 @@ +--- +description: Use the F5 NGINX Controller API Manager to add APIs and control how your + APIs are exposed and consumed. +docs: DOCS-569 +doctypes: +- tutorial +tags: +- docs +title: Manage Your APIs +toc: true +weight: 110 +--- + +## Overview + +The F5 NGINX Controller API Management module provides full life cycle management for your APIs. This document provides a walkthrough of the steps needed to create, version, and publish your API using the NGINX Controller API Management module. When you have completed this guide, you should have the following resources: + +- An **API Definition**, which stores a collection of related API resources. It can be thought of as a folder. +- An **API Version**, which describes a particular API and serves as the data contract. It describes available endpoints and operations on each endpoint and can also include API documentation. +- A **Published API**, which represents an API Version that has been deployed to an NGINX Plus instance serving as an API Gateway. +- (Optional) API documentation available via the Developer Portal. + +{{< note >}} + +- You must have an API Management module license installed to complete the steps in this guide. +- The API Management module is available to users with the predefined [Admin or User Roles]({{< relref "/controller/platform/access-management/manage-roles.md#predefined-roles-and-role-groups" >}}). + +{{< /note >}} + +## Create an API Definition + +An API Definition functions as a collection of related API resources. + +1. Open the NGINX Controller user interface and log in. + +2. Select the NGINX Controller menu icon, then select **Services**. + +3. On the **Services** menu, select **APIs**. + +4. On the **All APIs** page, select **Create** and choose **API Definition**. Alternatively, you can also select **Create API Definition** from the Quick Actions list. + +## Create an API Version + +An API Version describes a particular API. It can be thought of as an API specification. + +1. Open the NGINX Controller user interface and log in. + +2. Select the NGINX Controller menu icon, then select **Services**. + +3. On the **Services** menu, select **APIs**. + +4. On the **All APIs** page, select **Create** and choose **API Version**. Alternatively, you can also select **Create API Version** from the Quick Actions list. + +5. Select an existing **API Definition** under which to group the API Version or select **Create New** to add a new **API Definition**. + +6. Choose how you would like to describe the API: + + 1. [OpenAPI specification](#import-an-openapi-specification) + + 2. [Configure manually](#define-api-resources-manually) + + 3. [WSDL file](#import-a-web-services-description-language-wsdl-file) (Currently only supports unauthenticated, unencrypted traffic) + +7. Provide a version. If a version isn't provided, the default version `unspecified` is used. + +8. (Optional) Provide a display name. + +9. (Optional) Provide a description. + + {{< note >}} + + If your API specification includes a description, that text populates this field automatically when you [add your OpenAPI spec](#import-an-openapi-specification). + + {{< /note >}} + +10. (Optional) Add tags. + +### Import an OpenAPI Specification + +The APIM module supports import of a valid OpenAPI v3 specification formatted as valid JSON or YAML. + +{{< note >}} + +If your API spec includes documentation elements, the "Enable documentation" option is selected automatically. You do not need to take any additional steps to document your API. + +{{< /note >}} + +**To import your spec by uploading a file:** + +1. Select **OpenAPI Specification**. + +2. Select **Import file**. + +3. Drag and drop your file into the upload box, or select **Browse** to find and upload a file. + +**To import your spec by copying and pasting:** + +1. Select **OpenAPI Specification**. + +2. Select **Copy and paste specification text**. + +3. Paste or type your API in the space provided. + +Once you have imported your API spec, select **Next** to continue to the **Resources** page. + +### Define API Resources Manually + +Take the steps below to manually add your API resources. + +1. Select **Configure Manually**. + +2. Select **Next** to continue to the **Resources** page. + +3. Select **Add API Resource**. + +4. Select the **Match Type** to use for the API resource path. + +5. Specify the **Path** for the API resource. +**Tip**: Path should start with `/`, for example, `/userlookup/{userid}/attributes/{surname}`. + +6. Select the HTTP method(s). + +7. (Optional) [Document Your API](#document-your-api). + +8. Review the API spec that will be submitted to create the **API Version**. + +9. Select **Submit** to save the **API Version**. + +### Document Your API + +Follow the steps below to document your API. + +{{< note >}} + +API documentation must follow the OpenAPI 2.0/3.0 Specification. + +If you uploaded an API spec that contains documentation, you don't need take any further steps to document your API. + +{{< /note >}} + +{{< tip >}} + +Skip to step 6 if you're continuing from the [Define API Resources Manually](#define-api-resources-manually) section. + +{{< /tip >}} + +1. Open the NGINX Controller user interface and log in. + +2. Select the NGINX Controller menu icon, then select **Services**. + +3. On the **Services** menu, select **APIs**. + +4. On the **All APIs** page, select the **API Version** for which you want to create documentation. Click the pencil (edit) button to edit the API Version. + +5. Select **Resources**. + +6. Select the pencil (edit) icon next to the method or methods that you want to document. + +7. Select **Enable Documentation**. + +8. Add a summary. + +9. (Optional) Add a description. + +10. (Optional) Add a request body description. + +11. (Optional) Add a sample request. + +12. Specify whether the request body is required. + +13. To add a parameter, select **Add Parameter**. + +14. Provide the parameter name. + +15. (Optional) Provide a parameter description. + +16. Select the parameter type. + +17. Select the parameter value. + +18. (Optional) Specify whether the parameter is required. + +19. To add a response, select **Add Response**. + +20. Provide the HTTP Response status code. + +21. Provide a description. + +22. (Optional) Provide a sample response in JSON format. + +23. Select **Next** to review the API spec that will be submitted to update the **API Version**. + +24. Select **Submit** to save the **API Version**. + +### Import a Web Services Description Language (WSDL) file + + {{< caution >}} + +Currently, only HTTP is supported for SOAP-REST proxy traffic. Traffic will be unauthenticated and unencrypted, and as a result will be vulnerable to several security risks. It should be treated as a beta/preview feature. + + {{< /caution >}} + +The APIM module supports importing a WSDL file that describes a SOAP service. + +**To import your spec by uploading a file:** + +1. Select **WSDL File**. + +2. Select **Import file**. + +3. Drag and drop your file into the upload box, or select **Browse** to find and upload a file. + +**To import your spec by copying and pasting:** + +1. Select **WSDL file**. + +2. Select **Copy and paste WSDL text**. + +3. Paste or type your API in the space provided. + +Once you've imported your API spec, select **Next** to continue to the **Resources** page. Note that you need to submit the API spec before you can modify the **Resources** and **Schema**. Select **Submit** to save the **API Version.** + +### Modify Schema and Resources for an API Version created from a WSDL file + +Take the following steps to **Edit** add your API Version: + +1. On the **All APIs** page, select the **API Version** that was created from a WSDL + +2. Select **Next** to continue to the **Resources** page. + +3. For each **SOAP operation**, choose the appropriate equivalent **REST Method**. + +4. (optional) Modify the **Path** for the API resource as desired. + + {{< tip >}} + + Path should start with `/`, for example, `/userlookup/{userid}/attributes/{surname}`. + + {{< /tip >}} + +5. Select **Next** to continue to the **Schema** page + +6. (Optional) For each JSON schema, modify the **Property** as desired + +7. Review the API spec that will be submitted to create the **API Version**. + +8. Select **Submit** to save the **API Version**. + +## Publish an API + +You need at least one of each of the resources listed below to complete this section. If you haven't already created the required resources, you can do so while configuring the Published API. + +- [Environment]({{< relref "/controller/services/manage-environments.md" >}}) + +- [Gateway]({{< relref "/controller/services/manage-gateways.md" >}}) + +- [App]({{< relref "/controller/app-delivery/manage-apps.md" >}}) + +- [Identity Provider]({{< relref "/controller/services/manage-identity-providers.md" >}}) + + (required to add Authentication to the Published API Component). + +{{< tip >}} +You can connect one or more [Developer Portals]({{< relref "/controller/api-management/manage-dev-portals.md" >}}) to your Published API to host your API documentation. This can be done either when creating or editing your Published API, or independently via the API Quick Actions menu. +{{< /tip >}} + +### Add a Published API + +1. Open the NGINX Controller user interface and log in. + +2. Select the NGINX Controller menu icon, then select **Services**. + +3. On the **Services** menu, select **APIs**. + +4. On the **All APIs** page, select the **API Version** that you want to publish. + +5. Select **Add Published API**. + +#### Configure the Published API + +On the **Create Published API** *Configuration* page: + +1. Select the **API Definition Version** that you want to publish. + +2. (Optional) Provide a **Base Path** for the Published API. + +3. Specify whether the **Strip Base Path** parameter is required. + + {{< note >}} + + The `Strip Base Path` option modifies the path that is passed from the Gateway to the upstream host. When the option is selected, the base path will be removed from the original request when the request is passed to the upstream host. If the option is not selected, the original request -- including the base path -- is passed from the Gateway to the upstream host. + + {{< /note >}} + +4. Provide a Name and/or Display Name for the Published API. + +5. (Optional) Provide a description for the Published API. + +6. (Optional) Add tags. + +7. Select **Next**. + +#### Define the Published API Deployment + +For each of the steps below, you can create a new resource for the Published API by selecting the **Create New** link. + +On the **Create Published API** *Deployment* page: + +1. Select the **Environment** that the Published API belongs to. + +2. Select the **App** that the Published API represents. + +3. Select the **Gateway(s)** that will expose the Published API. + +4. Select the **Dev Portal(s)** that will host documentation for the Published API. + +5. Select **Next**. + +#### Define the Routing Rules + +On the **Create Published API** *Routing* page: + +1. Select the **Add New** link to create a new App Component resource for the Published API. The **Create App Component** page has multiple sections. + +2. On the **Create App Component** *Configuration* page: + + 1. Provide the name for your Component. + + 2. (Optional) Provide a Display Name. + + 3. (Optional) Provide a Description. + + 4. (Optional) Add any desired tags. + + 5. (Optional) Select the error response format. + + 6. Select **Next**. + +3. On the **Create App Component** *Workload Groups* page: + + 1. Provide a Workload Group Name. + + 2. (Optional) Select a Location. The default Location is 'Unspecified'. This value is applied automatically to "bring your own" (BYO) NGINX Plus instances that were not deployed by NGINX Controller. + + 3. Define the backend workload URIs. + + 4. (Optional) Define the DNS Server. + + 5. (Optional) Select the Load Balancing Method. The default value is `Round Robin`. + + 6. (Optional) Select the Session Persistence Type (applicable only to Web Components). + + 7. (Optional) Select the Desired Proxy Settings (applicable only to Web Components). + + 8. Select **Next**. + {{< see-also >}} + + - Refer to the [Manage Locations]({{< relref "/controller/infrastructure/locations/manage-locations.md" >}}) topic for more information. + + - Refer to the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/) for more information about the available options. + + {{< /see-also >}} + {{< tip >}} + Hover your pointer over the info icon for each setting to learn about the expected values and requirements. + {{< /tip >}} + + +4. On the **Create App Component** *Rate Limiting* page: + + 1. Enable Rate Limiting and select a **Key**. + + 2. Select options for Rate and Units. + + 3. (Optional) Select options for Excess Request Processing and Ignore Initial N Requests. + + 4. Select options for Reject Status Code. + + 5. Select **Next**. + +5. On the **Create App Component** *Authentication* page: + + 1. Select **Add Authentication**. + + 2. Select an [**Identity Provider**]({{< relref "/controller/services/manage-identity-providers.md" >}}). + + 3. Select a **Credential Location**. + + 1. (Optional) Enable [**Conditional Access**]({{< relref "/controller/services/available-policies.md#conditional-access" >}}). + + 4. Select **Next**. + +{{< important >}} + +The **Advanced Security** features require an *NGINX Controller API Management Advanced Security* license. + +{{< /important >}} + +6. On the **Create App Components** *Advanced Security* page: + + 1. (Optional) Select **Enable Web Application Firewall (WAF)** to monitor and block suspicious requests or attacks. + + 2. (Optional) Select **Monitor Only** to allow traffic to pass without being rejected. Security events are still generated and metrics are still collected. Refer to [About App Security Analytics]({{< relref "/controller/analytics/view-app-security-analytics.md" >}}) for more information. + + 3. (Optional) Add the signature(s) that you want WAF to ignore. You can specify multiple signatures as a comma-separated list. + + 4. Select **Next** + + {{< see-also >}} Refer to the [Default WAF Policy]({{< relref "/controller/app-delivery/security/concepts/app-sec-default-policy-original.md" >}}) topics to learn more about the default protection provided by NGINX App Protect. {{< /see-also >}} + + +7. On the **Create App Component** *Ingress* page: + + 1. (Optional) Set the desired **Client Max Body Size**. + 2. Select **Next**. + + {{< see-also >}} + + Refer to the [NGINX module docs](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) for more information about this option. + + {{< /see-also >}} + + +8. On the **Create App Component** *Monitoring* page: + + 1. (Optional) Enable **Health Monitoring** and define the desired Monitoring Request and Response. Health Monitoring is disabled by default. + + 2. (Optional) Specify the URI to use in health check requests (applicable only to Web Components). The default is `/`. For TCP/UDP Components, specify the Send string. + + 3. (Optional) Specify the port to use when connecting to a server to perform a health check. The server port is used by default. + + 4. (Optional) Set the interval to wait between two consecutive health checks. The default is 5 seconds. + + 5. (Optional) Specify the number of consecutive passed health checks that must occur for a server to be considered healthy. The default is `1`. + + 6. (Optional) Specify the number of consecutive failed health checks that must occur for a server to be considered unhealthy. The default is `1`. + + 7. (Optional) Specify the default state for the server. The default state is `HEALTHY`. + + 8. (Optional) Specify the starting HTTP status code to match against (applicable only to Web components). + + 9. (Optional) Specify the ending HTTP status code to match against (applicable only to Web components). + + 10. (Optional) Select whether a response should pass in order for the health check to pass (applicable only to Web components). By default, the response should have status code `2xx` or `3xx`. + + 11. Select **Next**. + + {{< see-also >}} + + Refer to the [NGINX module docs](http://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) for more information about these options. + + {{< /see-also >}} + +9. On the **Create App Component** *Logs* page: + + 1. (Optional) Select the logs to enable: + + § Error Log + + § Access Log + + 2. (Optional) Specify the log format to use. + + 3. Select **Next**. + + {{< see-also >}} + + Refer to the [NGINX docs](http://nginx.org/en/docs/http/ngx_http_log_module.html) for more information about these options. + + {{< /see-also >}} + +9. On the **Create App Component** *Programmability* page: + + The following settings are applicable **only to Web components**. + + 1. (Optional) Select **Add URI Redirects** and define the desired redirect condition(s). + + 2. (Optional) Select **Add URI Rewrite** and define the desired rewrite pattern(s). + + 3. (Optional) Select **Add Request Header Modification** and define how to modify the request header. + + 4. (Optional) Select **Add Response Header Modification** and define how to modify the response header. + + 5. Select **Next**. + + {{< see-also >}} + + Refer to the [NGINX module docs](http://nginx.org/en/docs/http/ngx_http_rewrite_module.html) for more information about these options. + + {{< /see-also >}} + + 6. Select **Next** to review the API spec that will be sent to create the App Component. + + 7. Drag and drop resources one at a time, or move multiple resources by selecting the checkboxes next to the desired resources, from the **Unrouted** column to the desired Component in the **Components** list. You can use the search bar to narrow down the list. + **Note:** Resources can be dragged between **Components** and back to the **Unrouted** section either one at a time or by multi-select. + + 8. Select **Next** to review the API spec that will be sent to create the Published API. + + 9. Select **Submit** to create the Published API. + +## Create a Developer Portal + +Once you have created an API Definition and a Published API, you can host your API in a Developer Portal. + +From the **API Definitions** page, select **Create Dev Portal** from the Quick Actions menu. Then, follow the steps in [Create a Developer Portal]({{< relref "/controller/api-management/manage-dev-portals.md" >}}) to create, customize, and publish your Dev Portal. + +{{< versions "3.0" "3.18" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} diff --git a/content/controller/api-management/manage-dev-portals.md b/content/controller/api-management/manage-dev-portals.md new file mode 100644 index 000000000..02923d76e --- /dev/null +++ b/content/controller/api-management/manage-dev-portals.md @@ -0,0 +1,140 @@ +--- +description: Learn how to create and manage Developer Portals for your API documentation. +docs: DOCS-570 +doctypes: +- tutorial +tags: +- docs +title: Manage Developer Portals +toc: true +weight: 120 +--- + +## Overview + +You can use F5 NGINX Controller Developer Portals (also called 'Dev Portals') to create and manage beautiful, easy-to-use API reference documentation to support your [Published APIs]({{< relref "/controller/api-management/manage-apis.md#publish-an-api" >}}). + +## About Developer Portals + +In NGINX Controller, each Dev Portal sits within an Environment. An Environment can contain multiple Dev Portals. You can use the same Dev Portal names across different Environments, which means you can create "test", "dev", and "production" versions of your Dev Portal across the corresponding Environments. + +Each Dev Portal is associated with a Gateway, which defines the URI at which users can access the Dev Portal -- for example, `developer.acme.com`. A Gateway for a Developer Portal can be placed on a dedicated Instance, or share an Instance with other Gateway resources. + +## Before You Begin + +You must complete the steps below before you can create a Developer Portal. + +1. [Create an Environment]({{< relref "/controller/services/manage-environments.md" >}}). +1. [Create a Gateway]({{< relref "/controller/services/manage-gateways.md" >}}) for the Dev Portal. + + {{< tip >}} +You can create multiple Dev Portal Gateways on the same Instance. If you do so, be sure to use a unique hostname and port for each. For example: + +- Gateway 1's ingress URI is `https://dev-developer.acme.com`. +- Gateway 2's ingress URI is `https://test-developer.acme.com`. These resources might both have IP addresses and ports that are accessible only from within your private network. +- Gateway 3's ingress URI is `https://developer.acme.com`. This resource would have a public IP address and be accessible via the internet. + +If you create multiple Dev Portal Gateways on the same Instance using the same hostname and port, the Dev Portal configuration will fail. + {{< /tip >}} + +1. [Create an API Definition]({{< relref "/controller/api-management/manage-apis.md#create-an-api-definition" >}}). + + {{< tip >}} +If you choose to [define your API manually]({{< relref "/controller/api-management/manage-apis.md#define-resources-manually" >}}), be sure to [document your API]({{< relref "/controller/api-management/manage-apis.md#document-your-api" >}}). + {{< /tip >}} + +1. [Create a Published API]({{< relref "/controller/api-management/manage-apis.md#publish-an-api" >}}). + + {{< important >}} +You must create an App Component when creating a Published API. You'll [assign routes]({{< relref "/controller/api-management/manage-apis.md#define-the-routing-rules" >}}) from the API Definition to this Component. + +Both the Published API and the associated App Component must be successfully created before you can create a Dev Portal. + +See [Manage Your APIs]({{< relref "/controller/api-management/manage-apis.md" >}}) and the [troubleshooting](#troubleshoot-dev-portal-publication) section below for more information. + +You also have the option to associate Dev Portal(s) in the *Deployment* page when you [Add a Published API]({{< relref "/controller/api-management/manage-apis.md#add-a-published-api" >}}). If you already have a Published API and you want to create a new Dev Portal to host it, complete the tasks described in this guide. + + {{< /important >}} + +## Create a Developer Portal + +To create a Dev Portal, take the steps below: + +1. Open the NGINX Controller user interface and log in. +2. Select the NGINX Controller menu icon, then select Services. +3. On the **Services** menu, select APIs. +4. On the APIs page, select **Create Dev Portal** from the Quick Actions menu. + + {{< tip >}} +If you want to connect one or more Dev Portals to an existing Published API, you should select the **Edit Published API** option. The API Documentation will be published to the selected Dev Portal(s). Refer to the [Define the Published API Deployment]({{< relref "/controller/api-management/manage-apis.md#define-the-published-api-deployment" >}}) section for more information and instructions. + {{< /tip >}} + +### Configure the Developer Portal + +On the **Create Dev Portal** *Configuration* page: + +1. Provide a resource name for the Dev Portal. +2. (Optional) Provide a display name, description, and tags. +3. Select the desired Environment, or select Create to create a new resource. +4. Select a Gateway, or select Create to create a new resource. +5. Select the Published API(s) that you want to host in the Dev Portal. +6. Select **Next** to move to the **Themes** page. + +### Define the Dev Portal Theme + +On the **Create Dev Portal** *Themes* page: + +1. Select **Brand** to define the following elements: + + - **Brand Name**, + - **Logo**, and + - **Favicon** + +2. Select **Next**. +3. Set the **Colors** for theme elements. Then, select **Next**. +4. Set the **Fonts** for the theme. Then, select **Next**. +5. Review the **API Spec**, then select **Submit**. + +> You should now be able to access the Dev Portal via the hostname and port that you assigned to the Dev Portal Gateway. + +## View, Edit, or Delete a Developer Portal + +To view, edit, or delete a Dev Portal, take the steps below: + +1. Open the NGINX Controller user interface and log in. +2. Select the NGINX Controller menu icon, then select Services. +3. On the **Services** menu, select APIs. +4. On the APIs menu, select **Dev Portals**. + +To **edit** a Dev Portal: + +1. Select the **Edit** icon for the Dev Portal. +2. Edit the Dev Portal as desired. + + - Select **Configure** to update the Dev Portal configurations, including the Environment, Gateway, and Published API. + - Select **Brand** to customize the **Brand Name** and to upload a **Logo** and **Favicon**. + - Select **Color** to customize the Dev Portal theme colors. + - Select **Fonts** to customize the Dev Portal theme fonts. + +3. Select **Submit** to save your changes. + +To **delete** a Dev Portal, select the **Delete** icon. Then, select **Delete** in the confirmation prompt window. + +## Troubleshoot Dev Portal Publication + +If the Gateway that the Dev Portal is associated with is in an error state, publishing your Dev Portal will fail. You won't necessarily see an error in the Dev Portals section of the user interface when this happens, but configuration errors in these resources will impact Dev Portal functionality. + +- App Component configuration errors are displayed only in the App Component section of the user interface. +- Published API configuration errors are displayed in the Published APIs section of the user interface, as well as in the Dev Portal. +- Dev Portal configuration errors are not displayed in the NGINX Controller user interface. + +If your Dev Portal failed to publish, check the status of the Gateway first; resolve any issues with the Gateway, then try publishing the Dev Portal again. +If the issue persists, check the other resources for configuration errors. + +## What's Next + +- [Learn about Policies]({{< relref "available-policies.md" >}}) +- [Manage Your APIs]({{< relref "manage-apis.md" >}}) + +{{< versions "3.7" "3.18" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} diff --git a/content/controller/api/_index.md b/content/controller/api/_index.md new file mode 100644 index 000000000..fbce5140c --- /dev/null +++ b/content/controller/api/_index.md @@ -0,0 +1,9 @@ +--- +description: Learn how to use the F5 NGINX Controller REST API. +menu: + docs: + parent: NGINX Controller +title: API Reference +weight: 210 +url: /nginx-controller/api/reference/ +--- diff --git a/content/controller/api/overview.md b/content/controller/api/overview.md new file mode 100644 index 000000000..58455c2ff --- /dev/null +++ b/content/controller/api/overview.md @@ -0,0 +1,124 @@ +--- +description: Provides information about the F5 NGINX Controller API. +docs: DOCS-343 +doctypes: +- concept +layout: docs +tags: +- docs +title: API Overview +toc: true +weight: 10 +--- + +## Introduction + +The F5 NGINX Controller API is a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) API that allows you to programmatically manage your NGINX Plus data planes. + +NGINX Controller follows an "API-first" approach, which means that all NGINX Controller functionality is exclusively exposed through declarative and resource-oriented APIs. Even the user interface (user interface) uses our REST API! You'll find examples of REST request bodies in the user interface. You can rest assured that the example you see is correct, because that is the call that the user interface is going to make to apply your requested configuration. + +## Encoding + +All NGINX Controller API endpoints expect and return JSON-formatted data by default. +When appropriate, the API accepts and returns other media types, such as file uploads or downloads. + +All JSON-formatted data is expected to be encoded using UTF-8 as described by the [IETF JSON Spec](https://tools.ietf.org/html/rfc8259). +If you do not specify a specific media type in an API call, then the API defaults to `"application/json"`. If you specify multiple acceptable media types, the first type that the API supports is chosen for the response. In the event of a request for a media type that the API doesn't support, it returns a "415 Unsupported Media Type" response. + +## Object Model + +The NGINX Controller API -- as well as the user interface and the product documentation -- is organized into four top-level areas: + +- **Analytics**: Enables data visualization for NGINX Controller. +- **Infrastructure**: Lets you manage your NGINX Plus instances and certain aspects of the host machines on which NGINX Controller and NGINX Plus instances run. +- **Platform**: Lets you manage NGINX Controller options and configurations, including Users, Roles, Licenses, and Global Settings. +- **Services**: Lets you manage your applications and APIs. + +The diagrams below demonstrate how the different objects at the Service level relate to each other: + +1. All Service objects are part of an Environment. +1. Gateways and Certs can be defined at the Environment level --or-- at the Component Level. The diagram below shows an example of how traffic flows through a Gateway to an App. +1. Components are child objects that represent the back-end server(s) that host your App or API. + {{}}A Component can represent an application **or** an API. The same Component cannot be used for both App Delivery and API Management.{{}} +1. Certs can be added to a Gateway or to an individual Component. + +{{< img src="/ctlr/img/services-object-model-example.png" alt="Diagram showing the relationship of objects in an Environment within the Services area." >}} +{{< img src="/ctlr/img/traffic-flow-example-1.png" alt="Example traffic flow through a gateway to app components that represent a back-end application. Certs can be configured at the gateway or at the app component level." >}} + +### Permissions + +Access to each of these areas is determined by your User Role. Roles grant Users access to specific Environments; Role permission levels define what content you can see ("Read" access) and interact with ("Write" access). Users with Roles that contain "Full" access can interact with all areas. + +The diagram below shows a sample System Administrator (or, "SysAdmin") workflow. The SysAdmin user has full administrator permissions, which allows creation of objects in all areas. In this workflow, the SysAdmin user creates an Environment; then creates a Role that has permission to interact with objects in that Environment; and, finally, creates a User. The Role grants the User access to objects in the Environment. + +{{< img src="/ctlr/img/netops-workflow.png" alt="Example System Admin workflow" >}} + +The diagram below shows a sample deployment workflow. In this workflow, the user - a Deployment Manager - has read and write access to objects in one specific Environment, but no access to other Environments. Within the allowed Environment, the user can create objects or select from objects that were added by a system administrator. In this workflow, the Deployment Manager creates an App and an App Component. Associated objects like Certs and Gateways can be added -- or selected from a list -- when adding the App Component. The configs for load balancing, monitoring, and URI redirects are defined as part of the App Component as well. + +{{< img src="/ctlr/img/devops-workflow-simple.png" alt="Example deployment workflow" >}} + +{{< see-also >}} + +- [Managing Roles & Users]({{< relref "/controller/platform/access-management/manage-users.md" >}}) + +{{< /see-also >}} + +## Authentication + +The NGINX Controller API uses session cookies to authenticate requests. The session cookie is returned in response to a `GET /api/v1/platform/login` request. See the Login endpoint in the [NGINX Controller API Reference]({{< relref "/controller/api/_index.md" >}}) documentation for information about session cookie timeouts and invalidation. + +{{< tip >}} +You can send a GET request to the login endpoint to find the status of the session token. +{{< /tip >}} + +For example: + +- Login and capture the session cookie: + + ```curl + curl -c cookie.txt -X POST --url 'https://198.51.100.10/api/v1/platform/login' --header 'Content-Type: application/json' --data '{"credentials": {"type": "BASIC","username": "arthur@arthurdent.net","password": "Towel$123"}}' + ``` + +- Use the session cookie to authenticate and get the session status: + + ```curl + curl -b cookie.txt -c cookie.txt -X GET --url 'https://198.51.100.10/api/v1/platform/login' + ``` + + +## Resource Types + +The NGINX Controller API contains two types of resources: immediately consistent and eventually consistent. + +Immediately consistent resources are synchronous. For these resources, any changes you make will be applied at the time the request is received. Requests to modify state using an API write operation (POST, PUT, PATCH or DELETE) result in the transmitted data being stored by the server as state. There is no need to check for progress, success, or failure using an API read operation (GET) for these resources. The original response should communicate if the request was successful. + +Eventually consistent resources are asynchronous. For these resources, any changes you request will be applied over time. Requests to modify state using an API write operation (POST, PUT, PATCH or DELETE) result in the transmitted data being stored by the server and messages or events being generated to eventually apply this state. You may check for progress, success, or failure using an API read operation (GET). The original response communicates that the data resulting in instructions was understood by the system. + +## Resource Properties + +All NGINX Controller API resources contain the following properties: + +```json +{ + "metadata": { + }, + "desiredState": { + }, + "currentStatus": { + } +} +``` + +The `desiredState` property is a representation of the state that you want to apply to the system. The properties within `desiredState` are the API representation of data. While changes to `desiredState` may trigger eventually consistent operations, the object itself is "immediately consistent". Consumers of the API can "read their own writes" and should always be able to retrieve the current desired state, no matter where the system is in the process of applying the state change. + +The `currentStatus` property represents the current state of the system. Its purpose is to communicate the progress of achieving eventual consistency to the API consumer. As such, `currentStatus` is a read-only property. + +## Versioning + +The introduction of backwards-incompatible changes to the NGINX Controller API constitutes a major version change. This will be represented in the NGINX Controller API version string. For example, to use a `v2` API, you would use `https:///api/v2`. + +When any NGINX Controller component requires a version change, we will release a new version of the entire API. In other words, you won't see a mix of `v1` and `v2` objects in the same API. + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.18" "latest" "apimvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/api/reference/ctlr-adc-api.md b/content/controller/api/reference/ctlr-adc-api.md new file mode 100644 index 000000000..94af14cb5 --- /dev/null +++ b/content/controller/api/reference/ctlr-adc-api.md @@ -0,0 +1,16 @@ +--- +description: + Represents the state of the F5 NGINX Controller Application Delivery REST + API. +docs: DOCS-1280 +doctypes: + - reference +type: redoc +tags: + - api +title: ADC API +toc: false +weight: 300 +--- + +{{< openapi spec="/controller/api/reference/ctlr-adc-openapi.json" >}} diff --git a/content/controller/api/reference/ctlr-adc-openapi.json b/content/controller/api/reference/ctlr-adc-openapi.json new file mode 100644 index 000000000..019f99da3 --- /dev/null +++ b/content/controller/api/reference/ctlr-adc-openapi.json @@ -0,0 +1,13365 @@ +{ + "openapi": "3.0.0", + "info":{ + "title": "NGINX Controller Application Delivery REST API", + "version": "v1", + "description": "Use the NGINX Controller Application Delivery module to configure, secure, monitor, and troubleshoot load balancing for your applications." + }, + "servers": [ + { + "description": "NGINX Controller API", + "url": "https://{{CONTROLLER_FQDN}}/api/v1" + }], + "tags": [ + { + "name": "Instances", + "description": "Use the Instance API to manage NGINX Controller Instance resources." + }, + { + "name": "Instance Groups", + "description": "Use the Instance Groups API to manage a set of instances that can be used for scaling and high availability. The Instance Groups API is a beta feature and is not recommended for use in production environments." + }, + { + "name": "Instance Templates", + "description": "Use the Instance Templates API to manage templates that can be used to deploy new NGINX Plus instances." + }, + { + "name": "Locations", + "description": "Use the Locations API to manage the deployment locations for NGINX Controller." + }, + { + "name": "Integrations", + "description": "Use the Integrations API to manage integrated cloud provider accounts." + }, + { + "name": "Environments", + "description": "Use the Environments API to manage your Application Environments." + }, + { + "name": "Certs", + "description": "Use the Certs API to manage the certificates used to secure your App traffic." + }, + { + "name": "Gateways", + "description": "Use the Gateways API to manage Gateway resources." + }, + { + "name": "Apps", + "description": "Use the Apps API to manage App resources." + }, + { + "name": "Components", + "description": "Use the Components API to define child components (for example, microservices) for your Apps." + }, + { + "name": "API Definitions", + "description": "Use the API Definitions API to manage your APIs by using the NGINX Controller API Management module." + }, + { + "name": "DevPortals", + "description": "Use the DevPortals API to manage DevPortals resources." + }, + { + "name": "Identity Providers", + "description": "Use the Identity Provider API to manage Identity providers in the API-M Credential Management partition." + }, + { + "name": "Published APIs", + "description": "Use the Published APIs API to manage your APIs by using the API Management module." + }, + { + "name": "Error Sets", + "description": "Use the Error Sets API to view the default predefined Error Sets." + }, + { + "name": "Services", + "description": "Use the Services API to request a metadata list of a desired resource within a single environment or across all environments.\nSupported resources:\n - published-apis\n" + }, + { + "name": "Strategies", + "description": "Manage Security Strategies for your Apps and APIs. The current supported strategy is an NGINX App Protect Strategy (beta)." + }, + { + "name": "Policies", + "description": "Manage Security Policies for your Apps and APIs. The current supported policy is an NGINX App Protect Policy (beta)." + } + ], + "paths": { + "/infrastructure/instance-groups": { + "get": { + "tags": [ + "Instance Groups" + ], + "summary": "List Instance Groups", + "description": "Returns an unfiltered list of all Instance Group resources.", + "operationId": "listInstanceGroups", + "responses": { + "200": { + "description": "Successfully retieved a list of all the configured Instance Groups.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListInstanceGroupsResponse" + }, + "example": { + "value": { + "items": [ + { + "currentStatus": { + "instanceRefs": [], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": {}, + "metadata": { + "createTime": "2020-05-13T09:29:49.464273Z", + "description": "us-west-2 autoscale group", + "displayName": "aws-autoscale-group", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/amz-us-west-2-as-group" + }, + "name": "amz-us-west-2-as-group", + "uid": "802ef1f8-9105-474a-b9a7-599837efd6b4", + "updateTime": "2020-05-13T09:29:49.464273Z" + } + }, + { + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": {}, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + ] + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + } + } + }, + "post": { + "tags": [ + "Instance Groups" + ], + "summary": "Create an Instance Group", + "description": "Creates an new Instance Group resource.", + "operationId": "addInstanceGroup", + "requestBody": { + "description": "Defines the Instance Group resource to be added.", + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceGroup" + }, + "example": { + "value": { + "metadata": { + "name": "k8s-nginx-deploy", + "displayName": "K8S NGINX+ deployment", + "description": "k8s-nginx-deploy" + }, + "desiredState": {} + } + } + } + } + }, + "responses": { + "202": { + "description": "The Instance Group resource has been accepted for creation. The Instance Group will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceGroupResponse" + }, + "example": { + "value": { + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + }, + "desiredState": {}, + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + } + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/instance-groups/{instanceGroupName}": { + "get": { + "tags": [ + "Instance Groups" + ], + "summary": "Get an Instance Group", + "description": "Returns information about a specified Instance Groupe resource.", + "operationId": "getInstanceGroup", + "parameters": [ + { + "$ref": "#/components/parameters/InstanceGroupName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Instance Group resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceGroupResponse" + }, + "example": { + "value": { + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + }, + "desiredState": {}, + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + } + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + } + } + }, + "put": { + "tags": [ + "Instance Groups" + ], + "summary": "Upsert an Instance Group", + "description": "Creates a new Instance Group resource or updates an existing Instance Group resource.", + "operationId": "upsertInstanceGroup", + "parameters": [ + { + "$ref": "#/components/parameters/InstanceGroupName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceGroup" + }, + "example": { + "value": { + "metadata": { + "name": "k8s-nginx-deploy", + "displayName": "K8S NGINX+ deployment", + "description": "k8s-nginx-deploy" + }, + "desiredState": {} + } + } + } + }, + "required": true + }, + "responses": { + "202": { + "description": "The Instance Group resource has been accepted for creation or update.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceGroupResponse" + }, + "example": { + "value": { + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + }, + "desiredState": {}, + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + } + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + } + } + }, + "delete": { + "tags": [ + "Instance Groups" + ], + "summary": "Delete an Instance Group", + "description": "Deletes the specified Instance Group resource.", + "operationId": "deleteInstanceGroup", + "parameters": [ + { + "$ref": "#/components/parameters/InstanceGroupName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Instance Group resource." + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/locations": { + "get": { + "tags": [ + "Locations" + ], + "summary": "List all Locations", + "description": "Returns a list of all Locations.", + "operationId": "listLocations", + "responses": { + "200": { + "description": "Successfully retrieved a list of all of the configured Locations.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListLocationResponse" + }, + "example": { + "value": { + "items": [ + { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T09:29:49.464273Z", + "description": "Location for instances where location has not been specified", + "displayName": "Unspecified (default)", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/unspecified" + }, + "name": "unspecified", + "tags": [ + "default" + ], + "uid": "802ef1f8-9105-474a-b9a7-599837efd6b4", + "updateTime": "2020-05-13T09:29:49.464273Z" + } + }, + { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "Other Location for managing instances", + "displayName": "OtherLocation-1", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-other-location" + }, + "name": "my-other-location", + "tags": [ + "dev", + "prod" + ], + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + ] + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + } + } + }, + "post": { + "tags": [ + "Locations" + ], + "summary": "Create a Location", + "description": "Creates a new Location resource.", + "operationId": "addLocation", + "requestBody": { + "description": "Defines the Location resource to be added.", + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Location" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationRequest" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationRequest" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationRequest" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested Location.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetLocationResponse" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationResponse" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationResponse" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationResponse" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "405": { + "$ref": "#/components/responses/NotAllowed" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/locations/{locationName}": { + "get": { + "tags": [ + "Locations" + ], + "summary": "Get a Location", + "description": "Returns information about a specified Location resource.", + "operationId": "getLocation", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Location resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetLocationResponse" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationResponse" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationResponse" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationResponse" + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + } + } + }, + "put": { + "tags": [ + "Locations" + ], + "summary": "Upsert a Location", + "description": "Creates a new Location resource or updates an existing Location resource.", + "operationId": "upsertLocation", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Location" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationRequest" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationRequest" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationRequest" + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successfully received the request to update the specified Location resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetLocationResponse" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationResponse" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationResponse" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationResponse" + } + } + } + } + }, + "201": { + "description": "Successfully updated the specified Location resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetLocationResponse" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationResponse" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationResponse" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationResponse" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "405": { + "$ref": "#/components/responses/NotAllowed" + } + } + }, + "delete": { + "tags": [ + "Locations" + ], + "summary": "Delete a Location", + "description": "Deletes the specified Location resource.", + "operationId": "deleteLocation", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Location resource." + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "405": { + "$ref": "#/components/responses/NotAllowed" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/locations/{locationName}/instances": { + "get": { + "tags": [ + "Instances" + ], + "summary": "List all Instances in a Location", + "description": "Returns the status and metadata for all of the Instances in the specified Location.", + "operationId": "listInstances", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "responses": { + "200": { + "description": "Successfully returned the status and metadata for all of the Instances in the specified Location.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListInstanceResponse" + }, + "examples": { + "INSTANCES": { + "$ref": "#/components/examples/ListInstanceResponse" + } + } + } + } + } + } + }, + "post": { + "tags": [ + "Instances" + ], + "summary": "Create an Instance", + "description": "Creates a new Instance resource.", + "operationId": "createInstance", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceRequest" + }, + "examples": { + "AWS_INSTANCE": { + "$ref": "#/components/examples/AWSInstanceRequest" + }, + "AZURE_INSTANCE": { + "$ref": "#/components/examples/AzureInstanceRequest" + } + } + } + } + }, + "responses": { + "202": { + "description": "The Instance resource has been accepted for creation. The Instance will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceResponse" + }, + "examples": { + "AWS_INSTANCE": { + "$ref": "#/components/examples/AWSInstance" + } + } + } + } + }, + "400": { + "description": "Bad input parameter or URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error creating the instance: could not parse the request payload. Check the format of the request, then try again.", + "code": 120647 + } + } + } + }, + "409": { + "description": "The request failed due to a conflict with an existing Instance resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error creating the instance: the instance already exists. Use a unique name for the instance, then try again.", + "code": 120652 + } + } + } + } + } + } + }, + "/infrastructure/locations/{locationName}/instances/{instanceName}": { + "get": { + "tags": [ + "Instances" + ], + "summary": "Get an Instance", + "description": "Returns the status and metadata for a single Instance.", + "operationId": "getInstance", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceName" + } + ], + "responses": { + "200": { + "description": "Successfully returned the details for an Instance.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceResponse" + }, + "examples": { + "OTHER_INSTANCE": { + "$ref": "#/components/examples/OtherInstance" + }, + "AWS_INSTANCE": { + "$ref": "#/components/examples/AWSInstance" + }, + "AZURE_INSTANCE": { + "$ref": "#/components/examples/AzureInstance" + } + } + } + } + }, + "404": { + "description": "Instance not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error getting the instance: the specified instance does not exist. Check the instance name, then try again.", + "code": 120603 + } + } + } + } + } + }, + "put": { + "tags": [ + "Instances" + ], + "summary": "Update an Instance", + "description": "Updates the description or display name of an existing Instance.", + "operationId": "updateInstance", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceRequest" + }, + "examples": { + "OTHER_INSTANCE": { + "$ref": "#/components/examples/InstanceUpdateRequest" + }, + "AWS_INSTANCE": { + "$ref": "#/components/examples/InstanceUpdateRequest" + }, + "AZURE_INSTANCE": { + "$ref": "#/components/examples/InstanceUpdateRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the existing Instance.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceResponse" + }, + "examples": { + "OTHER_INSTANCE": { + "$ref": "#/components/examples/OtherInstance" + }, + "AWS_INSTANCE": { + "$ref": "#/components/examples/AWSInstance" + }, + "AZURE_INSTANCE": { + "$ref": "#/components/examples/AzureInstance" + } + } + } + } + }, + "400": { + "description": "Bad input parameter or URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error updating the instance: could not parse the request payload. Check the format of the request, then try again.", + "code": 120612 + } + } + } + }, + "404": { + "description": "Instance not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error updating the instance: the specified instance does not exist. Check the instance name, then try again.", + "code": 120614 + } + } + } + } + } + }, + "delete": { + "tags": [ + "Instances" + ], + "summary": "Delete an Instance", + "operationId": "deleteInstance", + "description": "Deletes the specified Instance.", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceName" + } + ], + "responses": { + "202": { + "description": "Request for delete accepted", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceResponse" + } + } + } + }, + "204": { + "description": "Successfully deleted the Instance. No content is returned." + }, + "404": { + "description": "Instance not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error deleting the instance: the specified instance does not exist. Check the instance name, then try again.", + "code": 120609 + } + } + } + }, + "409": { + "description": "Failed to delete the requested Instance resource.\n\nThe Instance is referenced by another resource. Remove the references to the Instance, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error deleting the instance: the NGINX instance is being configured. Try again later. If the problem persists, contact the system administrator.", + "code": 120640 + } + } + } + } + } + } + }, + "/infrastructure/locations/{locationName}/instance-templates": { + "get": { + "tags": [ + "Instance Templates" + ], + "summary": "List Instance Templates", + "description": "Returns an unfiltered list of all Instance Template resources in the specified Location.", + "operationId": "listInstanceTemplates", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved a list of all Instance Template resources for the specified Location.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListInstanceTemplateResponse" + }, + "examples": { + "INSTANCE_TEMPLATES": { + "$ref": "#/components/examples/AWSListResponse" + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + } + } + }, + "post": { + "tags": [ + "Instance Templates" + ], + "summary": "Create an Instance Template", + "description": "Creates a new Instance Template resource.", + "operationId": "addInstanceTemplate", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "requestBody": { + "description": "Defines the Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceTemplate" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSRequest" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureRequestWithMarketplaceImageAndUsingExistingNic" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceTemplateResponse" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSResponse" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureResponse" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/locations/{locationName}/instance-templates/{instanceTemplateName}": { + "get": { + "tags": [ + "Instance Templates" + ], + "summary": "Get an Instance Template", + "description": "Gets information for the specified Instance Template resource.", + "operationId": "getInstanceTemplate", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceTemplateName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceTemplateResponse" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSResponse" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureResponse" + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + } + } + }, + "put": { + "tags": [ + "Instance Templates" + ], + "summary": "Upsert an Instance Template", + "description": "Creates a new Instance Template resource or updates an existing Instance Template resource.", + "operationId": "upsertInstanceTemplate", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceTemplateName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceTemplate" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSRequest" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureRequestWithCustomImageAndCreatingNewNicAndPublicIP" + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successfully updated the specified Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceTemplateResponse" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSResponse" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureResponse" + } + } + } + } + }, + "201": { + "description": "Successfully created the requested Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceTemplateResponse" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSResponse" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureResponse" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + }, + "delete": { + "tags": [ + "Instance Templates" + ], + "summary": "Delete an Instance Template", + "description": "Deletes the specified Instance Template resource.", + "operationId": "deleteInstanceTemplate", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceTemplateName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Instance Template resource." + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + } + } + } + }, + "/platform/integrations": { + "get": { + "tags": [ + "Integrations" + ], + "summary": "List all Integrations", + "description": "Returns an unfiltered list of account Integrations.", + "operationId": "listIntegrations", + "responses": { + "200": { + "description": "Successfully retreived all Integration accounts.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListIntegrationResponse" + } + } + } + } + } + }, + "post": { + "tags": [ + "Integrations" + ], + "summary": "Create an Integration account", + "description": "Creates a new Integration account.", + "operationId": "addIntegration", + "requestBody": { + "description": "Defines the Integration account to be added.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Integration" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested Integration.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a conflict with an existing Integration.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/platform/integrations/{integrationName}": { + "get": { + "tags": [ + "Integrations" + ], + "summary": "Get an Integration account", + "description": "Gets information about a specific Integration account.", + "operationId": "getIntegration", + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "Integration not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Integrations" + ], + "summary": "Update an Integration account", + "description": "Updates an Integration account.", + "operationId": "updateIntegration", + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Integration" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successfully received the request to update the specified Integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + } + } + } + }, + "201": { + "description": "Successfully updated the specified Integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Integrations" + ], + "summary": "Delete an Integration account", + "description": "Deletes the specified Integration account resource.", + "operationId": "deleteIntegration", + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Integration resource." + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/services/environments": { + "get": { + "tags": [ + "Environments" + ], + "summary": "List all Environments", + "description": "Returns a list of all Environment resources.\n", + "operationId": "listEnvironments", + "responses": { + "200": { + "description": "Successfully retrieved a list of all Environment resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EnvironmentList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Environments" + ], + "summary": "Create an Environment", + "description": "Creates a new Environment resource.\n", + "operationId": "createEnvironment", + "requestBody": { + "description": "Defines the Environment resource to create.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Environment.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "202": { + "description": "The Environment resource has been accepted for creation. The Environment will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/services/environments/{environmentName}": { + "get": { + "tags": [ + "Environments" + ], + "summary": "Get an Environment", + "description": "Returns information for the specified Environment.\n", + "operationId": "getEnvironment", + "responses": { + "200": { + "description": "Successfully returned information for the specified Environment resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Environments" + ], + "summary": "Upsert an Environment", + "description": "Creates a new Environment or updates an existing Environment resource.\n", + "operationId": "updateEnvironment", + "requestBody": { + "description": "Defines the Environment to create or the updates to make to an existing Environment.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Environment resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "201": { + "description": "Successfully created the specified Environment resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "202": { + "description": "The Environment resource has been accepted for creation or update. The Environment will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Environments" + ], + "summary": "Delete an Environment", + "description": "Deletes the specified Environment resource.", + "operationId": "deleteEnvironment", + "responses": { + "202": { + "description": "The Environment resource has been marked for deletion. The Environment will be deleted after the underlying resources have been deleted.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Environment resource. No content returned.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the requested Environment resource.\n\nThe Environment contains references to other objects. Delete the referenced objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/certs": { + "get": { + "tags": [ + "Certs" + ], + "summary": "List all Certs", + "description": "Returns a list of Cert metadata objects for all of the Certs in the specified environment.", + "operationId": "listCerts", + "responses": { + "200": { + "description": "Successfully retrieved a list of Certs for the specified Environment.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Certs" + ], + "summary": "Create a Cert", + "operationId": "createCert", + "description": "Creates a new Cert resource in the specified Environment.\n", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cert" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Cert resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Cert.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/certs/{certName}": { + "get": { + "tags": [ + "Certs" + ], + "summary": "Get a Cert", + "operationId": "getCert", + "description": "Returns information for a specific Cert resource.", + "responses": { + "200": { + "description": "Sucessfully retrieved the requested Cert.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Certs" + ], + "summary": "Upsert a Cert", + "operationId": "updateCert", + "description": "Creates a new Cert or updates an existing Cert resource.", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cert" + } + } + } + }, + "responses": { + "200": { + "description": "Sucessfully updated the specified Cert resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "201": { + "description": "Successfully created the requested Cert resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Certs" + ], + "summary": "Delete a Cert", + "operationId": "deleteCert", + "description": "Deletes the specified Cert resource.", + "responses": { + "204": { + "description": "The specified Cert resource was successfully deleted." + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete the specified Cert resource failed.\nThe Cert is referenced by active objects and cannot be deleted. Delete the referencing objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Cert.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "certName", + "description": "The name of the Cert.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/ResourceName" + } + } + ] + }, + "/services/environments/{environmentName}/gateways": { + "get": { + "tags": [ + "Gateways" + ], + "summary": "List all Gateways", + "description": "Returns a list of all Gateways in the specified Environment.\n", + "operationId": "listGateways", + "responses": { + "200": { + "description": "Successfully retrieved a list of all Gateways for the specified Environment.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GatewayList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Gateways" + ], + "summary": "Create a Gateway", + "description": "Creates a new Gateway resource.\n", + "operationId": "createGateway", + "requestBody": { + "description": "Defines the Gateway resource to create.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "202": { + "description": "The Gateway resource has been accepted for creation. The Gateway will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to create a Gateway resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Gateway resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/gateways/{gatewayName}": { + "get": { + "tags": [ + "Gateways" + ], + "summary": "Get a Gateway", + "description": "Returns information for the specified Gateway resource.\n", + "operationId": "getGateway", + "responses": { + "200": { + "description": "Successfully retrieved the requested Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Gateways" + ], + "summary": "Upsert a Gateway", + "description": "Creates a new Gateway or updates an existing Gateway resource.\n", + "operationId": "updateGateway", + "requestBody": { + "description": "Defines the Gateway resource to create or the updates to make to an existing Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "201": { + "description": "Successfully created the specified Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "202": { + "description": "The Gateway resource has been accepted for creation or update. The Gateway will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update or create a Gateway resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Gateways" + ], + "summary": "Delete a Gateway", + "description": "Deletes the specified Gateway resource.", + "operationId": "deleteGateway", + "responses": { + "202": { + "description": "The Gateway resource has been marked for deletion. The Gateway will be deleted after the underlying resources have been deleted.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Gateway resource. No content is returned.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete a Gateway resource failed.\nThe Gateway is referenced by an App Component(s) and cannot be deleted.\nDelete the App Component or remove the reference, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to delete a Gateway resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Gateway resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "gatewayName", + "description": "The name of the Gateway.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/GatewayName" + } + } + ] + }, + "/services/environments/{environmentName}/apps": { + "get": { + "tags": [ + "Apps" + ], + "summary": "List all Apps", + "description": "Returns a list of all App resources.\n", + "operationId": "listApps", + "responses": { + "200": { + "description": "Successfully retrieved a list of all App resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AppList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Apps" + ], + "summary": "Create an App", + "description": "Creates a new App resource.", + "operationId": "createApp", + "requestBody": { + "description": "An App.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified App resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "202": { + "description": "The App resource has been accepted for creation. The App will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}": { + "get": { + "tags": [ + "Apps" + ], + "summary": "Get an App", + "description": "Gets the information for a specific App resource.\n", + "operationId": "getApp", + "responses": { + "200": { + "description": "Successfully retrieved information for the requested App resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Apps" + ], + "summary": "Upsert an App", + "description": "Creates a new App resource or updates an existing App resource.\n", + "operationId": "updateApp", + "requestBody": { + "description": "Defines the App resource to create or update.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified App resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "201": { + "description": "Successfully created the specified App resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "202": { + "description": "The App resource has been accepted for creation or update. The App will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Apps" + ], + "summary": "Delete an App", + "description": "Deletes the specified App resource.\n\nYou must delete all of an App's child resources before you delete the App.\n", + "operationId": "deleteApp", + "responses": { + "202": { + "description": "The App resource has been marked for deletion. The App will be deleted after the underlying resources have been deleted.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified App resource.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete the specified App resource failed.\nThe App contains references to active objects and cannot be deleted. Delete the child objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "appName", + "description": "The name of the App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/AppName" + } + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}/components": { + "get": { + "tags": [ + "Components" + ], + "summary": "List all Components", + "description": "Returns a list of all of the Component resources that are contained by the specified App.\n", + "operationId": "listAppComponents", + "responses": { + "200": { + "description": "Successfully retrieved a list of Component resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ComponentList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + }, + "parameters": [ + { + "name": "strategyName", + "in": "query", + "description": "Filter desired component based on the strategy.", + "required": false, + "style": "form", + "explode": false, + "schema": { + "type": "string" + } + }, + { + "name": "policyName", + "in": "query", + "description": "Filter desired component based on the policy.", + "required": false, + "style": "form", + "explode": false, + "schema": { + "type": "string" + } + } + ] + }, + "post": { + "tags": [ + "Components" + ], + "summary": "Create a Component", + "description": "Creates a new Component resource.\n", + "operationId": "createAppComponent", + "requestBody": { + "description": "Defines the Component resource to create.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + }, + "examples": { + "ComponentRequest": { + "$ref": "#/components/examples/ComponentRequest" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested Component resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "202": { + "description": "The Component resource has been accepted for creation. The Component will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to create a Component resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Component's parent App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "appName", + "description": "The name of the App that contains the Component resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/AppName" + } + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}/components/{componentName}": { + "get": { + "tags": [ + "Components" + ], + "summary": "Get a Component", + "description": "Returns information for the specified Component.\n", + "operationId": "getAppComponent", + "responses": { + "200": { + "description": "Successfully returned the requested Component resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Components" + ], + "summary": "Upsert a Component", + "description": "Creates a new Component or updates an existing Component resource.\n", + "operationId": "updateAppComponent", + "requestBody": { + "description": "Defines the Component resource to create or update.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + }, + "examples": { + "ComponentRequest": { + "$ref": "#/components/examples/ComponentRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Component resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "201": { + "description": "Successfully created the specified Component resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "202": { + "description": "The Component resource has been accepted for creation or update. The Component will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update or create a Component resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Components" + ], + "summary": "Delete a Component", + "description": "Deletes the specified Component resource.", + "operationId": "deleteAppComponent", + "responses": { + "202": { + "description": "Component resource has been marked for deletion. The resource will be\ndeleted after the underlying resources have been freed.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Component. No content returned.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to delete a Component resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Component's parent App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "appName", + "description": "The name of the App that contains the Component resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/AppName" + } + }, + { + "name": "componentName", + "description": "The name of the Component resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/ComponentName" + } + } + ] + }, + "/services": { + "x-f5-experimental": true, + "get": { + "tags": [ + "Services" + ], + "summary": "List the metadata for all instances of the desired resource.", + "description": "Returns a metadata list of the requested resource. The resources that can be queried is currently restricted to published-apis.\n", + "operationId": "listResources", + "responses": { + "200": { + "description": "Successfully retrieved a metadata list of the requested resource.", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceMeta" + } + } + } + } + }, + "400": { + "description": "The resource defined in the query parameters could not be found or is not yet supported. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The environment defined in the query parameters could not be found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "resource", + "in": "query", + "description": "Desired resource to list.", + "required": true, + "style": "form", + "explode": false, + "schema": { + "type": "string", + "enum": [ + "published-apis" + ] + } + }, + { + "name": "environment", + "in": "query", + "description": "Filter desired resource based on the environment.", + "required": false, + "style": "form", + "explode": false, + "schema": { + "type": "string" + } + } + ] + }, + "/services/api-definitions": { + "get": { + "tags": [ + "API Definitions" + ], + "summary": "List API Definitions", + "description": "Returns a list of API Definition resources.", + "operationId": "apiDefinitionsSearch", + "responses": { + "200": { + "description": "Successfully retrieved a list of all API Definitions.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionList" + } + } + } + } + } + } + }, + "/services/api-definitions/{apiDefinitionName}": { + "get": { + "tags": [ + "API Definitions" + ], + "summary": "Get an API Definition", + "description": "Gets information about a specified API Definition.\n", + "operationId": "apiDefinitionsGet", + "responses": { + "200": { + "description": "Successfully returned the specified API Definition resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "404": { + "description": "The specified API Definition resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "API Definitions" + ], + "summary": "Upsert an API Definition", + "description": "Creates a new API Definition or updates an existing API Definition resource.\n", + "operationId": "apiDefinitionsPut", + "requestBody": { + "description": "Defines the API Definition resource to create, or the updates to apply to an existing API Definition resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified API Definition resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "201": { + "description": "Successfully created the requested API Definition resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "API Definitions" + ], + "summary": "Delete an API Definition", + "description": "Deletes the specified API Definition resource.", + "operationId": "apiDefinitionsDelete", + "responses": { + "204": { + "description": "Successfully deleted the requested API Definition resource. No content is returned.\n" + }, + "404": { + "description": "The specified API Definition resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the specified API Definition resource. Delete any referenced Published APIs or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/apiDefinitionName" + } + ] + }, + "/services/api-definitions/{apiDefinitionName}/versions": { + "get": { + "tags": [ + "API Definition Versions" + ], + "summary": "List API Definition Versions", + "description": "Lists all Versions for the specified API Definition resource.\n", + "operationId": "definitionVersionsSearch", + "responses": { + "200": { + "description": "Successfully retrieved the list of API Definition Version resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersionList" + } + } + } + }, + "404": { + "description": "The specified API Definition resource was not found or does not contain any Versions.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "API Definition Versions" + ], + "summary": "Add an API Definition Version", + "description": "Creates a single new API Definition Version resource or multiple Version resources.\n", + "operationId": "definitionVersionsPutList", + "requestBody": { + "description": "Creates, updates, or deletes an API Definition Version resource. It interacts with a list of items.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersionList" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully received the request to create the API Definition Version resource(s).", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersionList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the specified API Definition Version resource(s) as it has at least one dependent Published API. Delete the referenced Published API(s), then try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "API Definition Versions" + ], + "summary": "Delete API Definition Versions", + "description": "Deletes all Versions for the specified API Definition resource.\n", + "operationId": "definitionVersionsDeleteList", + "responses": { + "204": { + "description": "Successfully deleted the Versions for the requested API Definition resource. No content is returned.\n" + }, + "404": { + "description": "The specified API Definition resource was not found or does not contain any Versions.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the Versions for the specified API Definition resource. Delete or remove any references to Published APIs, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/apiDefinitionName" + } + ] + }, + "/services/api-definitions/{apiDefinitionName}/versions/{version}": { + "get": { + "tags": [ + "API Definition Versions" + ], + "summary": "Get an API Definition Version", + "description": "Gets information about an API Definition Version resource.\n", + "operationId": "definitionVersionsGet", + "responses": { + "200": { + "description": "Successfully returned the specified API Definition Version resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "404": { + "description": "The specified API Definition Version resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "API Definition Versions" + ], + "summary": "Upsert an API Definition Version", + "description": "Creates a single new Version resource or updates an existing API Definition Version resource.\n", + "operationId": "definitionVersionsPut", + "requestBody": { + "description": "Creates or updates an API Definition Version resource", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified API Definition Version resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "201": { + "description": "Successfully created the requested API Definition Version resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "API Definition Versions" + ], + "summary": "Delete an API Definition Version", + "description": "Deletes an API Definition Version resource.\n", + "operationId": "definitionVersionsDelete", + "responses": { + "204": { + "description": "Successfully deleted the API Definition Version resource.\n" + }, + "404": { + "description": "The specified API Definition Version resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the specified API Definition resource. Delete or remove any references to Published APIs, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/apiDefinitionName" + }, + { + "$ref": "#/components/parameters/versionName" + } + ] + }, + "/services/api-definitions/{apiDefinitionName}/versions/{version}/import": { + "put": { + "tags": [ + "API Definition Version Import" + ], + "summary": "Import an API Definition Version", + "description": "Imports an API spec to the specified Version of an API Definition.\n\nUse this endpoint to import a raw API specification to define your API.\n\n- This endpoint accepts a valid OpenAPI 3 spec, formatted as valid JSON or YAML.\n- The file provided for import will be validated against the\n [OAS v3 schema](https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v3.0/schema.yaml).\n- You must specify a \"Content-Type\" header when importing an API spec.\n The endpoint accepts the following \"Content-Type\" values:\n\n - application/json\n - application/yaml\n - text/x-yaml\n - application/x-yaml\n - text/yaml\n", + "operationId": "definitionVersionsImport", + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object" + } + }, + "application/yaml": { + "schema": { + "type": "string" + } + }, + "text/x-yaml": { + "schema": { + "type": "string" + } + }, + "application/x-yaml": { + "schema": { + "type": "string" + } + }, + "text/yaml": { + "schema": { + "type": "string" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully completed the API Version Import request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "201": { + "description": "Successfully created the requested API Definition Version resource from the spec provided.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "415": { + "description": "The request body contains an unsupported content type.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/apiDefinitionName" + }, + { + "$ref": "#/components/parameters/versionName" + } + ] + }, + "/security/identity-providers": { + "get": { + "tags": [ + "Identity Providers" + ], + "summary": "List Identity Providers", + "description": "Returns a list of all Identity Provider resources.\n\n> **Note:** These resources were known as Client Groups in pre-3.x versions of NGINX Controller.\n", + "operationId": "identityProvidersSearch", + "responses": { + "200": { + "description": "Successfully retrieved a list of Identity Provider resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/security/identity-providers/{identityProviderName}": { + "get": { + "tags": [ + "Identity Providers" + ], + "summary": "Get an Identity Provider", + "description": "Returns the specified Identity Provider resource.", + "operationId": "identityProvidersGet", + "responses": { + "200": { + "description": "Successfully returnd the specified Identity Provider resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "403": { + "description": "The request failed due to insufficient privileges.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Identity Providers" + ], + "summary": "Upsert an Identity Provider", + "description": "Creates a new Identity Provider, or creates an existing Identity Provider resource.\n", + "operationId": "identityProvidersPut", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + }, + "examples": { + "IdentityProviderRequest": { + "$ref": "#/components/examples/IdentityProviderRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Identity Provider resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "201": { + "description": "Successfully created the requested Identity Provider resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "202": { + "description": "Successfully accepted the requested Identity Provider resource and is currently processing it.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update or create an Identity Provider resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Identity Providers" + ], + "summary": "Delete an Identity Provider", + "description": "Deletes the specified Identity Provider resource.", + "operationId": "identityProvidersDelete", + "responses": { + "202": { + "description": "The Identity Provider resource has been marked for deletion. The resource will be deleted after the publish/cleanup succeeds.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Identity Provider resource. No content is returned." + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to delete an Identity Provider resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/identityProviderName" + } + ] + }, + "/security/identity-providers/{identityProviderName}/clients": { + "get": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "List Identity Provider Clients", + "description": "Returns a list of all Identity Provider Client resources.\n", + "operationId": "identityProviderClientsSearch", + "responses": { + "200": { + "description": "Successfully retrieved a list of Identity Provider Client resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClientList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Create Multiple Identity Provider Clients", + "description": "Creates or updates multiple Identity Provider Client resources.\n", + "operationId": "identityProviderClientsPutList", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClientList" + }, + "examples": { + "IdentityProviderClientListRequest": { + "$ref": "#/components/examples/IdentityProviderClientListRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully upserted the Identity Provider Client resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClientList" + } + } + } + }, + "202": { + "description": "Successfully accepted the requested Identity Provider Client resources and is currently processing it.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClientList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The Identity Provider Client resource conflicts with another resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/identityProviderName" + } + ] + }, + "/security/identity-providers/{identityProviderName}/clients/{identityProviderClientName}": { + "get": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Get an Identity Provider Client", + "description": "Returns information for the specified Identity Provider Client resource.", + "operationId": "identityProviderClientsGet", + "responses": { + "200": { + "description": "Successfully returned the specified Identity Provider Client resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "404": { + "description": "The specified Identity Provider Client resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Update an Identity Provider Client", + "description": "Updates the specified Identity Provider Client resource.", + "operationId": "identityProviderClientsPut", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + }, + "examples": { + "IdentityProviderClientRequest": { + "$ref": "#/components/examples/IdentityProviderClientRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Identity Provider Client resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "201": { + "description": "Successfully created the specified Identity Provider Client resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "202": { + "description": "Successfully accepted the requested Identity Provider Client resource and is currently processing it.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update or create an Identity Provider Client resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Update an Identity Provider Client", + "description": "Updates the specified Identity Provider Client resource.", + "operationId": "identityProviderClientsPatch", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateIdentityProviderClient" + }, + "examples": { + "IdentityProviderClientPatchMetadataRequest": { + "$ref": "#/components/examples/IdentityProviderClientPatchMetadataRequest" + }, + "IdentityProviderClientPatchDesiredStateRequest": { + "$ref": "#/components/examples/IdentityProviderClientPatchDesiredStateRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Identity Provider Client resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "202": { + "description": "Successfully accepted the request to update an Identity Provider Client resource and is currently processing it.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider Client resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update an Identity Provider Client resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Delete an Identity Provider Client", + "description": "Deletes the specified Identity Provider Client resource.", + "operationId": "identityProviderClientsDelete", + "responses": { + "202": { + "description": "Identity Provider Client resource has been marked for deletion. The resource will be\ndeleted after the publish/cleanup succeeds.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Identity Provider Client resource. No content is returned." + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider Client resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to delete an Identity Provider Client resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/identityProviderName" + }, + { + "$ref": "#/components/parameters/identityProviderClientName" + } + ] + }, + "/services/environments/{environmentName}/devportals": { + "get": { + "tags": [ + "DevPortals" + ], + "summary": "List all DevPortals", + "description": "Returns a list of all DevPortal resources.\n", + "operationId": "ListDevPortals", + "responses": { + "200": { + "description": "Successfully retrieved a list of all DevPortals resources.", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DevPortalsList" + } + } + } + } + }, + "400": { + "description": "Illegal input parameter or malformed URI specified. Check for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "DevPortals" + ], + "summary": "Create DevPortal", + "description": "Creates new Dev Portal resource.", + "operationId": "CreateDevPortal", + "requestBody": { + "description": "A Dev Portal.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created a specified Dev Portal resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "202": { + "description": "Dev Portal resource has been accepted for creation. A Dev Portal will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "400": { + "description": "Illegal input parameter or malformed URI specified. Check for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the DevPortal.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/devportals/{devPortalName}": { + "get": { + "tags": [ + "DevPortals" + ], + "summary": "Get Dev Portal", + "description": "Gets the information for a specific Dev Portal resource.\n", + "operationId": "GetDevPortal", + "responses": { + "200": { + "description": "Successfully retrieved information for the requested Dev Portal resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "400": { + "description": "Illegal input parameter or malformed URI specified. Check for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "DevPortals" + ], + "summary": "Upsert Dev Portal", + "description": "Creates a new Dev Portal resource or updates an existing Dev Portal resource.\n", + "operationId": "UpsertDevPortal", + "requestBody": { + "description": "Defines a Dev Portal resource to create or update.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Dev Portal resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "201": { + "description": "Successfully created the specified Dev Portal resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "202": { + "description": "The Dev Portal resource has been accepted for creation or update. Dev Portal will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "400": { + "description": "Illegal input parameter or malformed URI specified. Check for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "DevPortals" + ], + "summary": "Delete a DevPortal", + "description": "Deletes the specified Dev Portal resource.\nYou must delete all of a Dev Portal's child resources before you delete the Dev Portal.\n", + "operationId": "DeleteDevPortal", + "responses": { + "202": { + "description": "The DevPortal resource has been marked for deletion. DevPortal will be deleted after the underlying resources have been deleted.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified DevPortal resource.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete the specified App resource failed.\nThe App contains references to active objects and cannot be deleted. Delete the child objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the DevPortal.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "devPortalName", + "description": "The name of the DevPortal.", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ] + }, + "/services/errorsets": { + "get": { + "tags": [ + "Error Sets" + ], + "summary": "List all Error Sets.", + "description": "Returns a list of all the Error Sets.", + "operationId": "listErrorSets", + "responses": { + "200": { + "description": "Successfully retrieved a list of Error Sets.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorSetList" + } + } + } + } + } + } + }, + "/services/errorsets/{errorSetName}": { + "get": { + "tags": [ + "ErrorSets" + ], + "summary": "Get an Error Set.", + "operationId": "getErrorSet", + "description": "Returns the information for a specific Error Set.", + "responses": { + "200": { + "description": "Successfully retrieved the requested Error Set.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorSet" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "errorSetName", + "description": "The name of the Error Set.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/ResourceName" + } + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}/published-apis": { + "get": { + "tags": [ + "Published APIs" + ], + "summary": "List Published APIs", + "description": "Returns a list of all Published APIs for the specified Environment and App.\n", + "operationId": "listPublishedAPIs", + "responses": { + "200": { + "description": "Successfully retrieved the list of Published APIs for the specified Environment.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPIList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/environmentName" + }, + { + "$ref": "#/components/parameters/appName" + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}/published-apis/{publishedApiName}": { + "get": { + "tags": [ + "Published APIs" + ], + "summary": "Get a Published API", + "description": "Gets information about the specified Published API.\n", + "operationId": "getPublishedAPI", + "responses": { + "200": { + "description": "Successfully retrieved the specified Published API resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Published API resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Published APIs" + ], + "summary": "Upsert a Published API", + "description": "Creates a new Published API or updates an existing Published API resource.", + "operationId": "upsertPublishedAPI", + "requestBody": { + "description": "Defines the Published API to create, or the updates to apply to an existing Published API resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Published API resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "201": { + "description": "Successfully created the requested Published API resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "202": { + "description": "The request to create a Published API succeeded. The resource will be created when the configuration is complete.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Published APIs" + ], + "summary": "Delete a Published API", + "description": "Deletes the specified Published API resource.", + "operationId": "deletePublishedAPI", + "responses": { + "202": { + "description": "Published API has been marked for deletion. The resource will be\ndeleted after the publish/cleanup succeeds.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Published API resource. No content is returned." + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Published API resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/environmentName" + }, + { + "$ref": "#/components/parameters/appName" + }, + { + "$ref": "#/components/parameters/publishedApiName" + } + ] + }, + "/security/strategies": { + "get": { + "tags": [ + "Strategies" + ], + "summary": "List all Strategies", + "description": "Returns a list of Strategy metadata objects for all of the Strategies.", + "operationId": "StrategyServiceList", + "responses": { + "200": { + "description": "Successfully retrieved a list of Strategies.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StrategyList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Strategies" + ], + "summary": "Create a Strategy", + "operationId": "StrategyServicePost", + "description": "Creates a new Strategy resource.\n", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Strategy" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Strategy resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StrategyStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/security/strategies/{strategyName}": { + "get": { + "tags": [ + "Strategies" + ], + "summary": "Get a Strategy", + "operationId": "StrategyServiceGet", + "description": "Returns information for a specific Strategy resource.", + "responses": { + "200": { + "description": "Sucessfully retrieved the requested Strategy.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StrategyStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Strategies" + ], + "summary": "Upsert a Strategy", + "operationId": "StrategyServicePut", + "description": "Creates a new Strategy or updates an existing Strategy resource.", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Strategy" + } + } + } + }, + "responses": { + "200": { + "description": "Sucessfully updated the specified Strategy resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StrategyStatus" + } + } + } + }, + "201": { + "description": "Successfully created the requested Strategy resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StrategyStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Strategies" + ], + "summary": "Delete a Strategy", + "operationId": "StrategyServiceDelete", + "description": "Deletes the specified Strategy resource.", + "responses": { + "204": { + "description": "The specified Strategy resource was successfully deleted." + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete the specified Strategy resource failed.\nThe Strategy is referenced by active objects and cannot be deleted. Delete the referencing objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "strategyName", + "description": "The name of the Strategy.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/ResourceName" + } + } + ] + }, + "/security/policies": { + "get": { + "tags": [ + "Policies" + ], + "summary": "List all Policies", + "description": "Returns a list of Policy metadata objects for all of the Policies.", + "operationId": "listPolicies", + "responses": { + "200": { + "description": "Successfully retrieved a list of Policies.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Policies" + ], + "summary": "Create a Policy", + "operationId": "createPolicy", + "description": "Creates a new Policy resource.\n", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Policy" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Policy resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/security/policies/{policyName}": { + "get": { + "tags": [ + "Policies" + ], + "summary": "Get a Policy", + "operationId": "getPolicy", + "description": "Returns information for a specific Policy resource.", + "responses": { + "200": { + "description": "Sucessfully retrieved the requested Policy.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Policies" + ], + "summary": "Upsert a Policy", + "operationId": "updatePolicy", + "description": "Creates a new Policy or updates an existing Policy resource.", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Policy" + } + } + } + }, + "responses": { + "200": { + "description": "Sucessfully updated the specified Policy resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "201": { + "description": "Successfully created the requested Policy resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Policies" + ], + "summary": "Delete a Policy", + "operationId": "deletePolicy", + "description": "Deletes the specified Policy resource.", + "responses": { + "204": { + "description": "The specified Policy resource was successfully deleted." + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete the specified Policy resource failed.\nThe Policy is referenced by active objects and cannot be deleted. Delete the referencing objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "policyName", + "description": "The name of the Policy.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/ResourceName" + } + } + ] + } + }, + "components": { + "parameters": { + "environmentName": { + "name": "environmentName", + "description": "The name of the Environment that contains the Component's parent App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + "appName": { + "name": "appName", + "description": "The name of the App that contains the Component resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/AppName" + } + }, + "apiDefinitionName": { + "name": "apiDefinitionName", + "in": "path", + "description": "The name of the API Definition resource.", + "required": true, + "style": "simple", + "explode": false, + "example": "shopping-app-api-def", + "schema": { + "type": "string" + } + }, + "publishedApiName": { + "name": "publishedApiName", + "in": "path", + "description": "The name of the Published API resource.", + "required": true, + "style": "simple", + "explode": false, + "example": "shopping-app-api-staging", + "schema": { + "type": "string" + } + }, + "versionName": { + "name": "version", + "in": "path", + "description": "The Version of the API Definition.", + "required": true, + "style": "simple", + "explode": false, + "example": "v1_2fd4e1c6", + "schema": { + "type": "string" + } + }, + "LocationName": { + "name": "locationName", + "in": "path", + "description": "The name of the Location that contains the Instance.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "InstanceName": { + "name": "instanceName", + "in": "path", + "description": "The name of the Instance.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "InstanceGroupName": { + "name": "instanceGroupName", + "in": "path", + "description": "The name of the Instance Group.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "InstanceTemplateName": { + "name": "instanceTemplateName", + "in": "path", + "description": "The name of the Instance Template resource.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "identityProviderName": { + "name": "identityProviderName", + "in": "path", + "description": "The name of the Identity Provider.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "identityProviderClientName": { + "name": "identityProviderClientName", + "in": "path", + "description": "The name of the Identity Provider Client.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "IntegrationName": { + "name": "integrationName", + "in": "path", + "description": "The name of the Integration resource.", + "required": true, + "schema": { + "type": "string" + } + } + }, + "schemas": { + "ResourceName": { + "type": "string", + "description": "The name of a resource.", + "example": "production" + }, + "LocationState": { + "oneOf": [ + { + "$ref": "#/components/schemas/OtherLocation" + }, + { + "$ref": "#/components/schemas/AWSLocation" + }, + { + "$ref": "#/components/schemas/AzureLocation" + }, + { + "$ref": "#/components/schemas/VSphereLocation" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "OTHER_LOCATION": "#/components/schemas/OtherLocation", + "AWS_LOCATION": "#/components/schemas/AWSLocation", + "AZURE_LOCATION": "#/components/schemas/AzureLocation", + "VSPHERE_LOCATION": "#/components/schemas/VSphereLocation" + } + } + }, + "OtherLocation": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "description": "The location type. Select the environment where you want to create the location. \nTo create a location that’s not specific to any cloud environment, select OTHER_LOCATION.\n", + "enum": [ + "OTHER_LOCATION" + ] + } + } + }, + "AWSLocation": { + "type": "object", + "required": [ + "vpcID", + "region", + "integrationRef", + "type" + ], + "properties": { + "type": { + "type": "string", + "description": "The location type. Select the environment where you want to create the location. \nTo create a location that’s specific to the AWS cloud environment, select AWS_LOCATION.\n", + "enum": [ + "AWS_LOCATION" + ] + }, + "region": { + "description": "The AWS region.\n", + "type": "string" + }, + "vpcID": { + "type": "string", + "description": "The vpcID of the AWS Virtual Private Cloud (VPC) where new Instances created under this location should reside.\nThe VPC must be in the specified AWS region.\n" + }, + "integrationRef": { + "$ref": "#/components/schemas/ResourceRef" + } + } + }, + "AzureLocation": { + "type": "object", + "required": [ + "type", + "region", + "resourceGroup", + "subscriptionID", + "integrationRef" + ], + "properties": { + "type": { + "type": "string", + "description": "The location type.", + "enum": [ + "AZURE_LOCATION" + ] + }, + "region": { + "description": "The Azure region.", + "type": "string" + }, + "resourceGroup": { + "type": "string", + "description": "The name of the resourceGroup." + }, + "subscriptionID": { + "type": "string", + "description": "The unique alphanumeric string that identifies the Azure subscription." + }, + "integrationRef": { + "description": "Integration ref.", + "$ref": "#/components/schemas/ResourceRef" + } + } + }, + "VSphereLocation": { + "type": "object", + "required": [ + "type", + "datacenter", + "integrationRef" + ], + "properties": { + "type": { + "type": "string", + "description": "The location type.", + "enum": [ + "VSPHERE_LOCATION" + ] + }, + "datacenter": { + "description": "A VSphere data center.", + "type": "string" + }, + "folder": { + "type": "string", + "description": "The name of the VM folder that you want to add your instance to. If no name is provided, the instance will be created in the folder containing the VM template your instance is cloned from." + }, + "integrationRef": { + "description": "Integration ref.", + "$ref": "#/components/schemas/ResourceRef" + } + } + }, + "Location": { + "required": [ + "metadata", + "desiredState" + ], + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/LocationState" + }, + "currentStatus": { + "$ref": "#/components/schemas/LocationState" + } + } + }, + "GetLocationResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/Location" + } + ] + }, + "ListLocationResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Location" + } + } + } + }, + "AppList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "App": { + "type": "object", + "description": "An App is a collection of Components.", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "x-f5-experimental": true, + "type": "object" + }, + "currentStatus": { + "$ref": "#/components/schemas/AppCurrentStatus" + } + } + }, + "AppCurrentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "componentRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "Component": { + "description": "A component represents the processing –- reverse proxying, rate limiting, security policy enforcement, header rewrites, etc.\n–- of traffic associated with a logical part (for example, microservice) of an application/API. It also defines the subsequent\nload balancing of traffic to workloads implementing that part of the application/API.\n\nA component can be either a web or a TCP/UDP component –- indicated by the component type. Web components are used to\nconfigure NGINX functionality associated with HTTP/HTTPS protocols and inherit web and common settings from linked Gateways.\nTCP/UDP components are used to configure NGINX functionality associated with TCP/UDP protocols\nand inherit TCP/UDP and common settings from linked Gateways.\n", + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/ComponentDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/ComponentCurrentStatus" + } + } + }, + "ComponentName": { + "type": "string" + }, + "ComponentList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "WorkloadGroupSnippet": { + "description": "The NGINX directives to apply to the upstream blocks generated by the workload groups\nin the backend. Directives are applied to all workload groups by default. Use\napplicableWorkloadGroups to apply directives only to specific workload groups.\n", + "allOf": [ + { + "type": "object", + "properties": { + "applicableWorkloadGroups": { + "type": "array", + "description": "Workload group name.", + "items": { + "type": "string" + } + } + } + }, + { + "$ref": "#/components/schemas/NginxConfig" + } + ] + }, + "ComponentConfigSnippets": { + "type": "object", + "description": "The NGINX config snippets to insert into the server and location blocks\ngenerated by the component URIs.\n\n**Caution**: When you use snippets to customize your NGINX configuration, your changes are\napplied to the nginx.conf file *as is*. NGINX Controller does not verify that your configuration\nis valid before applying the snippet.\n\nWe strongly recommend verifying snippets in a lab environment before making any changes\nin production.\n", + "properties": { + "uriSnippets": { + "description": "An array of URI snippets.\n", + "type": "array", + "items": { + "$ref": "#/components/schemas/UriSnippet" + } + }, + "workloadGroupSnippets": { + "description": "An array of workload group snippets.\n", + "type": "array", + "items": { + "$ref": "#/components/schemas/WorkloadGroupSnippet" + } + } + } + }, + "ComponentStateCommon": { + "type": "object", + "description": "Settings common to Web & TCP/UDP Components.", + "properties": { + "configSnippets": { + "$ref": "#/components/schemas/ComponentConfigSnippets" + } + } + }, + "ComponentCurrentStatus": { + "description": "The current snapshot of the component settings that are reflected in the configuration on NGINX instances associated with\nthe Gateways that this Component references.\n\nThese settings should converge to those in Desired State as the new configuration is applied unless there are issues;\nthe State setting gives a summary of how the convergence is proceeding.\n", + "oneOf": [ + { + "$ref": "#/components/schemas/ComponentWebCurrentStatus" + }, + { + "$ref": "#/components/schemas/ComponentTcpUdpCurrentStatus" + } + ] + }, + "ComponentWebCurrentStatus": { + "description": "The current snapshot of the web component settings that are reflected in the configuration on NGINX instances associated with\nthe Gateways that this component references.\n\nThese settings should converge to those in Desired State as the new configuration is applied unless there are issues;\nthe State setting gives a summary of how the convergence is proceeding.\n", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentStateCommon" + }, + { + "type": "object", + "properties": { + "componentType": { + "description": "Defines what type of Component this is.", + "type": "string", + "enum": [ + "WEB" + ] + }, + "ingress": { + "$ref": "#/components/schemas/ComponentWebIngress" + }, + "backend": { + "$ref": "#/components/schemas/WebBackend" + }, + "programmability": { + "$ref": "#/components/schemas/Programmability" + }, + "logging": { + "$ref": "#/components/schemas/Logging" + }, + "security": { + "$ref": "#/components/schemas/Security" + }, + "errorSetRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "compression": { + "$ref": "#/components/schemas/Compression" + }, + "caching": { + "$ref": "#/components/schemas/Caching" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + } + ] + }, + "ComponentTcpUdpCurrentStatus": { + "description": "The current snapshot of the TCP/UDP component settings that are reflected in the configuration on NGINX instances associated with\nthe Gateways that this component references.\n\nThese settings should converge to those in Desired State as the new configuration is applied unless there are issues;\nthe State setting gives a summary of how the convergence is proceeding.\n", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentStateCommon" + }, + { + "type": "object", + "properties": { + "componentType": { + "description": "Defines what type of Component this is.", + "type": "string", + "enum": [ + "TCPUDP" + ] + }, + "ingress": { + "$ref": "#/components/schemas/ComponentTcpUdpIngress" + }, + "backend": { + "$ref": "#/components/schemas/TcpUdpBackend" + }, + "logging": { + "$ref": "#/components/schemas/Logging" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + } + ] + }, + "GatewayName": { + "type": "string" + }, + "GatewayList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "GatewayCurrentStatus": { + "description": "The current snapshot of the gateway settings that are reflected in the configuration on NGINX instances referenced by this Gateway.\nThese settings should converge to those in Desired State as the new configuration is applied unless there are issues;\nthe State setting gives a summary of how the convergence is proceeding.\n", + "allOf": [ + { + "$ref": "#/components/schemas/GatewayWebState" + }, + { + "$ref": "#/components/schemas/GatewayStateCommon" + }, + { + "type": "object", + "properties": { + "ingress": { + "$ref": "#/components/schemas/GatewayIngress" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + } + ] + }, + "Gateway": { + "type": "object", + "description": "A Gateway represents the initial network entry point of application and/or API traffic into an NGINX instance that is\nin the data path of this traffic. Gateway settings are combined with Component settings that reference the Gateway;\nthe resulting composite config is sent to the NGINX instances that the Gateway references.\n\nA Gateway can be referenced by either web components and/or TCP/UDP components. Web-only settings in the Gateway (for example, web URIs)\napply only to web components. TCP/UDP-only settings in the Gateway (for example, TCP/UDP URIs) apply only to TCP/UDP components.\nWeb and TCP/UDP common settings in the Gateway (for example, global TLS, socket) apply to both web and TCP/UDP components.\n\nExamples:\nGateway with web and TCP/UDP URIs, web-specific settings, common settings. Web URIs and web settings apply to web components.\nCommon settings apply to both component types. TCP/UDP URIs apply only to TCP/UDP components –- presently, there are no other TCP/UDP only settings.\n\nGateway web URIs (for example, https://www.xyz.com) combined with web component URIs (for example, /blog) define web URI config (https://www.xyz.com/blog).\nComponent TCP/UDP URIs define TCP/UDP URI config (for example, tcp+tls://192.168.1.1:200); Gateway TCP/UDP URIs can provide TLS info plus restrict\nwhich TCP/UDP URIs can be in the component. Alternatively, component URIs fully define the URI config if the Gateway has no URIs.\n\nIf only a single URI type is in the Gateway, the URI config for that type is determined by combining the Gateway URIs with\nthe URIs from components of that type; only component URIs are used for the URI config for the other type.\n\nFor HTTPS URIs, global Gateway TLS settings are used when more specific TLS info is not present in the Gateway URIs,\nor for component URIs that have an HTTPS protocol and hostnames with no specific URI or component global TLS settings defined.\n\nFor tcp+tls URIs, Gateway TLS settings are used when TLS info is not defined in a component URI or component global TLS.\nA Gateway URI’s TLS info is used if it encompasses the component URI. For example, tcp+tls://192.168.1.5:100-104 in the Gateway and\ncomponent URI of tcp+tls://192.168.1.5:100. Global Gateway TLS is used if no other TLS settings apply.\n", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/GatewayDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/GatewayCurrentStatus" + } + } + }, + "GZip": { + "properties": { + "isEnabled": { + "type": "boolean", + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip" + } + }, + "buffers": { + "type": "object", + "properties": { + "number": { + "type": "integer" + }, + "size": { + "type": "string", + "pattern": "^[0-9]+[k|K|m|M]{1}$" + } + }, + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_buffers" + } + }, + "level": { + "type": "integer", + "minimum": 1, + "maximum": 9, + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level" + } + }, + "disabledUserAgents": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "uniqueItems": true, + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_disable" + } + }, + "httpVersion": { + "type": "string", + "pattern": "^[1-3]{1}\\.[0-1]{1}$", + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_http_version" + } + }, + "minLength": { + "type": "integer", + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_min_length" + } + }, + "proxied": { + "type": "string", + "enum": [ + "DISABLED", + "EXPIRED", + "NOCACHE", + "NOSTORE", + "PRIVATE", + "NOLASTMODIFIED", + "NOETAG", + "AUTH", + "ANY" + ], + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_proxied" + } + }, + "mimeTypes": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "uniqueItems": true, + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_types" + } + }, + "vary": { + "type": "string", + "enum": [ + "DISABLED", + "ENABLED" + ], + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_vary" + } + }, + "static": { + "type": "string", + "enum": [ + "DISABLED", + "ENABLED", + "ALWAYS" + ], + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_static_module.html#gzip_static" + } + } + } + }, + "Compression": { + "description": "Gzip compression settings.", + "type": "object", + "properties": { + "gzip": { + "$ref": "#/components/schemas/GZip" + } + } + }, + "GatewayWebState": { + "description": "Non-ingress settings in a Gateway that apply only to Web Components.", + "type": "object", + "properties": { + "errorSetRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "compression": { + "$ref": "#/components/schemas/Compression" + } + } + }, + "NginxDirective": { + "required": [ + "directive" + ], + "properties": { + "directive": { + "type": "string", + "description": "The name of the NGINX directive. For a list of NGINX Directives, refer to [the NGINX documentation](http://nginx.org/en/docs/dirindex.html).\n" + }, + "args": { + "type": "array", + "description": "Directive arguments.", + "items": { + "type": "string" + } + }, + "block": { + "type": "array", + "description": "The directives to include within a block directive or context.", + "items": { + "$ref": "#/components/schemas/NginxDirective" + } + } + } + }, + "NginxConfig": { + "type": "object", + "required": [ + "directives" + ], + "properties": { + "directives": { + "type": "array", + "description": "List of NGINX directives to add to the configuration.\n", + "items": { + "$ref": "#/components/schemas/NginxDirective" + } + } + } + }, + "URIMatchMethod": { + "description": "Specifies how to determine a match between an incoming Web URI and configured Web URI.", + "type": "string", + "enum": [ + "PREFIX", + "REGEX", + "REGEX_CASE_SENSITIVE", + "SUFFIX", + "EXACT" + ], + "default": "PREFIX" + }, + "ApplicableUri": { + "type": "object", + "required": [ + "uri" + ], + "properties": { + "uri": { + "type": "string" + }, + "matchMethod": { + "$ref": "#/components/schemas/URIMatchMethod" + } + } + }, + "ApplicableUris": { + "type": "array", + "description": "Defines an array of ingress URIs with a corresponding matchMethod that this rule/snippet applies to.\nBy default, a rule/snippet applies to all ingress URIs in the gateway/component. A snippet can be\napplied only to URIs of the gateway/component that it is in. A snippet in a component cannot be\napplied to the gateway URI. However, a rule can be applied at a gateway level by specifying ingress\nURIs from the gateway, for example, \"http://www.nginx.com\". Note that applying the rule to URIs at\na gateway level can affect other components. The URI and the matchMethod must match an ingress URI defined\neither at the component or gateway level. If no match is found, the request is rejected.\n", + "items": { + "$ref": "#/components/schemas/ApplicableUri" + } + }, + "UriSnippet": { + "description": "The NGINX directives to apply to the server and location blocks generated by the URIs.\nDirectives are applied to all URIs by default. Use applicableURIs to apply directives\nonly to specific URIs.\n", + "allOf": [ + { + "type": "object", + "properties": { + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + } + } + }, + { + "$ref": "#/components/schemas/NginxConfig" + } + ] + }, + "DiskStore": { + "type": "object", + "required": [ + "path", + "maxSize", + "minFree", + "inMemoryStoreSize" + ], + "properties": { + "path": { + "description": "Defines what the path is for this storage", + "example": "/tmp/hdd1", + "type": "string", + "minLength": 1 + }, + "isDefault": { + "description": "When more than one disk stores are specified, the data stored in the cache is split by percentage or string.\n`isDefault` specifies a default storage for cache data that does not meet the percent or string criteria.\n", + "type": "boolean", + "default": false + }, + "percentCriteria": { + "description": "Defines the percentage that should be allocated to this store. This must be specified when `criteriaType` is PERCENTAGE.", + "type": "string", + "example": "75%", + "pattern": "\\d+(?:\\.\\d+)?%" + }, + "stringCriteria": { + "description": "Defines string pattern of data that should go in to this store. This must be specified when `criteriaType` is STRING", + "type": "array", + "items": { + "description": "Depending on the 'SplitConfig' \"Key\", string could have different patterns.\nLike ~.mp3$ for REGEX match or any string like 'hostname.*' or IP address\n", + "type": "string", + "minLength": 1, + "example": "[\"~.mp3$\", \"~*.avi\"],[\"10.1.1.1\", \"10.1.1.2\"]" + } + }, + "maxSize": { + "type": "string", + "description": "Defines the maximum size that the cache can grow to", + "example": "5G", + "pattern": "^[0-9]+[kKmMgG]?$" + }, + "minFree": { + "type": "string", + "description": "Defines the minimum amount of free disk space on the drive for the cache. If the free disk space drops below the minimum value, the cache is trimmed.", + "example": "10k", + "pattern": "^[0-9]+[kKmMgG]?$" + }, + "inMemoryStoreSize": { + "type": "string", + "description": "Defines the size of the memory zone where information about the data is stored.", + "example": "500m", + "pattern": "^[0-9]+[kKmM]?$" + }, + "trimPolicy": { + "description": "Defines how we configure `manager_files` for the proxy_cache_path\nCache management is performed in iterations. During one iteration no more than `maxFiles` items are deleted\nThis policy defines how many files are deleted, how often and how long the process lasts.\n", + "type": "object", + "properties": { + "maxFiles": { + "type": "integer", + "description": "Defines how many files are deleted in one iteration. NGINX defaults this to 100", + "minimum": 1 + }, + "frequency": { + "type": "string", + "description": "Defines the length of the pause between cache management iterations. The default for NGINX is 50 milliseconds.", + "example": "2s", + "pattern": "^[0-9]+[h|hm|hms|ms|m|s]{1,2}$" + }, + "durationThreshold": { + "type": "string", + "description": "Defines the duration for one cache management iteration. The default for NGINX is 200 milliseconds.", + "example": "2s", + "pattern": "^[0-9]+[h|hm|hms|ms|m|s]{1,2}$" + } + } + }, + "loaderPolicy": { + "description": "Defines how `loader_files` are configured for the `proxy_cache_path`.\nloading is done in iterations. During one iteration no more than maxFiles items are loaded\nDefines how previosly cached data is loadded in the cache `Zone`.\n", + "type": "object", + "properties": { + "maxFiles": { + "type": "integer", + "description": "During one iteration no more than `files` items are loaded. NGINX defaults this to 100", + "minimum": 1 + }, + "frequency": { + "type": "string", + "description": "Defines the length of the pause between cache management iterations. For NGINX, the default is 50 milliseconds.", + "example": "2s", + "pattern": "^[0-9]+[h|hm|hms|ms|m|s]{1,2}$" + }, + "durationThreshold": { + "type": "string", + "description": "Defines the maximum duration for one cache management iteration. For NGINX, the default is 200 milliseconds.", + "example": "2s", + "pattern": "^[0-9]+[h|hm|hms|ms|m|s]{1,2}$" + } + } + }, + "purgerPolicy": { + "description": "Defines how we configure `purger` for the proxy_cache_path\nPurger is done in iterations. During one iteration no more than maxFiles items are scanned\nSpecifies whether cache entires matching a wildcard key are removed from the disk by the cache purger.\n", + "type": "object", + "properties": { + "maxFiles": { + "type": "integer", + "description": "Defines the number of items to scan during one iteration. For NGINX, default is 10 items.", + "minimum": 1 + }, + "frequency": { + "type": "string", + "description": "Defines the length of the pause between cache purge iterations. For NGINX, the default is 50 milliseconds.", + "example": "10ms", + "pattern": "^[0-9]+[h|hm|hms|ms|m|s]{1,2}$" + }, + "durationThreshold": { + "type": "string", + "description": "Defines the maximum duration for one cache purge iteration. For NGINX, the default is 50 milliseconds.", + "example": "10ms", + "pattern": "^[0-9]+[h|hm|hms|ms|m|s]{1,2}$" + } + } + }, + "directoryLevel": { + "type": "object", + "description": "Defines the level of the directory structure that NGINX creates for the cache.\nDefines the cache hierarchy levels from 1 to 3, with each level accepting a value of `1` or `2`.\nFor example, `levels=1:2` file names in a cache will look like this. (**Note**: `/c/29` & `29c` at end)\n/data/nginx/cache/c/29/b7f54b2df7773722d382f4809d65029c\n", + "properties": { + "first": { + "type": "integer", + "minimum": 1, + "maximum": 2 + }, + "mid": { + "type": "integer", + "minimum": 1, + "maximum": 2 + }, + "last": { + "type": "integer", + "minimum": 1, + "maximum": 2 + } + } + }, + "tempPath": { + "description": "Determines whether you'll use a temporary path for the cache disk store.\nYou can define the path location by using `proxy_temp_path` in a `configSnippet` for the Component.\n", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, + "inactiveTime": { + "description": "Defines the length of time to wait before removing cached data that haven't been accessed.", + "example": "5s", + "type": "string", + "pattern": "^[0-9]+[h|hm|hms|ms|m|s]{1,2}$" + } + } + }, + "Caching": { + "description": "Cache settings for components applicable to web client requests.", + "type": "object", + "required": [ + "diskStores" + ], + "properties": { + "splitConfig": { + "description": "Allows the cache to be split among multiple storage devices", + "type": "object", + "required": [ + "criteriaType" + ], + "properties": { + "key": { + "type": "string", + "description": "Defines the variable on which the cache is split across different storage devices.", + "example": "${remote_addr}${http_user_agent}${date_gmt}", + "minLength": 1 + }, + "criteriaType": { + "type": "string", + "description": "Defines the criteria on which the cache is split across different storage devices.", + "enum": [ + "PERCENTAGE", + "STRING" + ], + "default": "PERCENTAGE" + } + } + }, + "diskStores": { + "description": "Defines an array of disk stores. If more than one is specified, the cache contents are split between the stores using splitConfig settings.\nIf criteriaType is PERCENTAGE, then percentCriteria should be specified to control how much goes in to a particular store.\nIf the criteriaType is STRING, then stringCriteria should be specified to control the split based on a pattern match.\n", + "type": "array", + "items": { + "$ref": "#/components/schemas/DiskStore" + } + } + } + }, + "ComponentWebDesiredState": { + "description": "The desired settings in the Web Component that the user wants in the configuration on NGINX instances associated with\nthe Gateways which this component references.\n", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentStateCommon" + }, + { + "type": "object", + "required": [ + "ingress", + "backend" + ], + "properties": { + "componentType": { + "description": "Defines the Component type. The default type is Web.", + "type": "string", + "enum": [ + "WEB" + ] + }, + "ingress": { + "$ref": "#/components/schemas/ComponentWebIngress" + }, + "backend": { + "$ref": "#/components/schemas/WebBackend" + }, + "programmability": { + "$ref": "#/components/schemas/Programmability" + }, + "logging": { + "$ref": "#/components/schemas/Logging" + }, + "security": { + "$ref": "#/components/schemas/Security" + }, + "errorSetRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "compression": { + "$ref": "#/components/schemas/Compression" + }, + "caching": { + "$ref": "#/components/schemas/Caching" + } + } + } + ] + }, + "ComponentTcpUdpIngress": { + "description": "Ingress settings in a TCP/UDP Component.", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentIngressCommon" + } + ] + }, + "TcpUdpMonitoring": { + "description": "Health monitor settings across all workload groups in a TCP/UDP Component.", + "allOf": [ + { + "$ref": "#/components/schemas/MonitoringCommon" + }, + { + "type": "object", + "properties": { + "send": { + "description": "Probe request for a TCP/UDP monitor.", + "type": "string" + }, + "response": { + "description": "Case-sensitive regular expression for the expected success response to a TCP/UDP monitor.", + "type": "string" + } + } + } + ] + }, + "TcpUdpProxy": { + "description": "Proxy retry and timeout settings applicable to servers in a TcpUdp workloadGroup associated with a Component.", + "type": "object", + "properties": { + "nextUpstream": { + "description": "When a connection to the proxied server cannot be established, determines whether a client connection will be passed to the next server.", + "type": "string", + "enum": [ + "ON", + "OFF" + ], + "default": "OFF" + }, + "connectTimeout": { + "description": "Defines a timeout for establishing a connection with the proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$" + }, + "proxyTimeout": { + "description": "Sets the timeout between two successive read or write operations on client or proxied server connections.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$" + } + } + }, + "TcpUdpWorkloadGroup": { + "description": "Group of servers hosting a part of a TCP/UDP application represented by a Component.", + "allOf": [ + { + "$ref": "#/components/schemas/WorkloadGroupCommon" + }, + { + "type": "object", + "properties": { + "proxy": { + "$ref": "#/components/schemas/TcpUdpProxy" + }, + "useServerPort": { + "$ref": "#/components/schemas/ServiceConfigState" + }, + "uris": { + "type": "object", + "description": "The URI of a server that hosts a TCP/UDP application.\n\nThe URI must conform to the format `schema://address:port`.\n\n- The schema must be one of the following: `tcp`, `udp`, or `tcp+tls`.\n- The address value can be an IP address or a host name.\n- All three elements -- schema, address, and port -- are required.\n\nNote: When `useServerPort` is `ENABLED`, then you should define a single URI that uses the `schema://address` format.\nDefining a `port` value for the URI when also using `useServerPort` will result in an error.\n\nFor example:\n\n- `tcp://192.0.2.247:8443`\n- `tcp+tls://192.0.2.247:8449`\n- `udp://www.f5workload.com:989`\n", + "additionalProperties": { + "$ref": "#/components/schemas/WorkloadUri" + } + } + } + } + ] + }, + "TcpUdpBackend": { + "description": "Backend settings in a TCP/UDP Component.\n", + "type": "object", + "properties": { + "monitoring": { + "$ref": "#/components/schemas/TcpUdpMonitoring" + }, + "workloadGroups": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/TcpUdpWorkloadGroup" + } + } + } + }, + "ComponentTcpUdpDesiredState": { + "description": "The desired settings in the TCP/UDP Component to use in the configuration on NGINX instances associated with the Gateways that this Component references.", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentStateCommon" + }, + { + "type": "object", + "required": [ + "componentType", + "ingress", + "backend" + ], + "properties": { + "componentType": { + "description": "Defines what type of Component this is. The type must be TCPUDP.", + "type": "string", + "enum": [ + "TCPUDP" + ] + }, + "ingress": { + "$ref": "#/components/schemas/ComponentTcpUdpIngress" + }, + "backend": { + "$ref": "#/components/schemas/TcpUdpBackend" + }, + "logging": { + "$ref": "#/components/schemas/Logging" + } + } + } + ] + }, + "ComponentDesiredState": { + "description": "The desired component settings to use in the NGINX instance configuration that's associated with\nthe Gateways that this component references.\n", + "oneOf": [ + { + "$ref": "#/components/schemas/ComponentWebDesiredState" + }, + { + "$ref": "#/components/schemas/ComponentTcpUdpDesiredState" + } + ] + }, + "GatewayConfigSnippets": { + "type": "object", + "description": "The NGINX config snippets to insert into the NGINX config generated by the gateway.\nNote that the default directives and directives generated by the native parts of the\nAPI(API sections other than configSnippets) cannot be overriden.\nmainSnippet - Insert directives in the main context.\nhttpSnippet - Insert directives in the http context.\nstreamSnippet - Insert directives in the stream context.\nuriSnippet - Insert into server blocks generated by the gateway URIs.\n This applies only to HTTP URIs, since TCP and UDP URIs do not\n generate server blocks.\n\n**Caution**: When you use snippets to customize your NGINX configuration, your changes are\napplied to the nginx.conf file *as is*. NGINX Controller does not verify that your configuration\nis valid before applying the snippet.\n\nWe strongly recommend verifying snippets in a lab environment before making any changes\nin production.\n", + "properties": { + "mainSnippet": { + "$ref": "#/components/schemas/NginxConfig" + }, + "httpSnippet": { + "$ref": "#/components/schemas/NginxConfig" + }, + "streamSnippet": { + "$ref": "#/components/schemas/NginxConfig" + }, + "uriSnippets": { + "description": "An array of URI snippets.\n", + "type": "array", + "items": { + "$ref": "#/components/schemas/UriSnippet" + } + } + } + }, + "HA": { + "description": "Data path high availability settings", + "type": "object", + "properties": { + "isEnabled": { + "type": "boolean", + "description": "Enables or disables HA.\n\nWhen set to `true`, supports configuring instances in an active‑passive, high‑availability (HA) setup.\n\nTo configure the gateway on HA mode:\n - `keepalived` must be installed and configured on the desired instances.\n - At least one listen IP address must be specified in the `listenIps` section within `InstanceRefs`\n\n See the `listenIps` section for additional requirements for configuring HA.\n" + } + } + }, + "GatewayStateCommon": { + "description": "Non-ingress settings in a Gateway that apply to Web and TCP/UDP Components.", + "type": "object", + "properties": { + "configSnippets": { + "$ref": "#/components/schemas/GatewayConfigSnippets" + }, + "ha": { + "$ref": "#/components/schemas/HA" + } + } + }, + "ServiceConfigState": { + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "default": "DISABLED" + }, + "GatewaySocketSettings": { + "description": "Socket settings in a Gateway.", + "type": "object", + "properties": { + "setFib": { + "x-f5-experimental": true, + "type": "integer" + }, + "fastOpen": { + "x-f5-experimental": true, + "type": "integer" + }, + "acceptFilter": { + "x-f5-experimental": true, + "type": "string", + "enum": [ + "DATA_READY", + "HTTP_READY" + ] + }, + "deferred": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + }, + "backlog": { + "x-f5-experimental": true, + "type": "integer", + "default": -1 + }, + "isIpv6Only": { + "x-f5-experimental": true, + "type": "boolean", + "default": false + }, + "reusePort": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + }, + "tcpKeepAlive": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ServiceConfigState" + }, + "idle": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$" + }, + "interval": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$" + }, + "count": { + "type": "integer" + } + } + }, + "receiveBufferSize": { + "type": "string", + "pattern": "^[0-9]+[k|K|m|M]{1}$" + }, + "sendBufferSize": { + "type": "string", + "pattern": "^[0-9]+[k|K|m|M]{1}$" + } + } + }, + "TLS": { + "description": "TLS settings applicable to URIs.", + "type": "object", + "required": [ + "certRef" + ], + "properties": { + "certRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "protocols": { + "type": "array", + "items": { + "type": "string", + "pattern": "TLSv1|TLSv1\\.[1-3]|SSLv2|SSLv3" + } + }, + "cipher": { + "type": "string", + "example": "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;" + }, + "preferServerCipher": { + "$ref": "#/components/schemas/ServiceConfigState" + }, + "sessionCache": { + "type": "string", + "enum": [ + "OFF", + "NONE", + "BUILTIN", + "SHARED" + ], + "default": "OFF" + } + } + }, + "IngressUri": { + "type": "object", + "properties": { + "matchMethod": { + "$ref": "#/components/schemas/URIMatchMethod" + }, + "tls": { + "$ref": "#/components/schemas/TLS" + }, + "serverPoolPort": { + "description": "The port number used when the IngressUri is deployed to an Instance that's a member of a BIG-IP server pool.", + "type": "integer" + } + } + }, + "IngressUris": { + "type": "object", + "description": "Defines the URI in Gateways and Components. The URI has different requirements depending on where it is used.\n\nFor Web URIs in Gateways, `uris` must be a complete URI that follows the format `://host[:port]`;\nadditionally each URI can have a match method specified and an HTTPS URI can include TLS information.\n Examples:\n - `http://www.f5.com`\n - `https://www.f5.com`\n - `http://www.f5.com:8080`\n - `http://*.info.f5.com`\n\nFor Web URIs in Components, `uris` can be either a complete URI that follows the format `://host[:port][/path]`\nor a relative path that follows the format `/path[/...]`;\nadditionally each URI can have a match method specified and an HTTPS URI can include TLS information.\n Examples:\n - `/images`\n - `/*.jpg`\n - `/locations/us/wa*`\n - `http://www.f5.com:8080/sales`\n - `http://*.f5.com:5050/testing`\n\nFor TCP/UDP URIs in Gateways and Components,`uris` must be a complete URI that follows the format `://<*|IP>:`;\nadditionally a tcp+tls URI can include TLS information. Match method is not applicable to TCP/UDP URIs.\n Examples:\n - `tcp://192.168.1.1:12345`\n - `tcp+tls://192.168.1.1:12346`\n - `tcp://192.168.1.1:12345-12350`\n - `tcp://*:12345`\n - `udp://192.168.1.1:12345`\n - `udp://*:12345`\n\nIn a TCP/UDP Component, URIs can either all have a protocol of udp or a mix of TCP and tcp+tls.\n", + "additionalProperties": { + "description": "Provide the URI associated with the resource.", + "$ref": "#/components/schemas/IngressUri" + } + }, + "ComponentIngressCommon": { + "description": "Ingress settings common to Web and TCP/UDP components.", + "type": "object", + "required": [ + "uris" + ], + "properties": { + "uris": { + "$ref": "#/components/schemas/IngressUris" + }, + "gatewayRefs": { + "description": "Reference(s) to existing Gateway resource(s).", + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "tls": { + "$ref": "#/components/schemas/TLS" + } + } + }, + "ComponentWebIngressClient": { + "description": "Non-buffer settings in a Component applicable to Web client requests.", + "type": "object", + "properties": { + "bodyInFileOnly": { + "description": "Determines whether NGINX Controller should save the entire client request body into a file.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED", + "CLEAN" + ], + "default": "DISABLED", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_file_only" + } + }, + "bodyTimeout": { + "description": "Defines a timeout for reading the client request body.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout" + } + }, + "maxBodySize": { + "description": "Sets the maximum allowed size of the client request body, specified in the “Content-Length” request header field.\n\nDisables checking of client request body size when set to 0.\n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size" + } + } + } + }, + "ComponentWebIngress": { + "description": "Ingress settings in a Web Component.", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentIngressCommon" + }, + { + "$ref": "#/components/schemas/WebIngressCommon" + }, + { + "type": "object", + "properties": { + "client": { + "$ref": "#/components/schemas/ComponentWebIngressClient" + }, + "buffers": { + "$ref": "#/components/schemas/WebIngressBuffersCommon" + } + } + } + ] + }, + "MonitoringCommon": { + "description": "Monitor settings common to Web and TCP/UDP.", + "type": "object", + "properties": { + "defaultState": { + "type": "string", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "default": "HEALTHY" + }, + "interval": { + "type": "integer", + "minimum": 1, + "default": 5 + }, + "consecutiveSuccessThreshold": { + "type": "integer", + "minimum": 1, + "default": 1 + }, + "consecutiveFailureThreshold": { + "type": "integer", + "minimum": 1, + "default": 1 + }, + "port": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + }, + "healthEvents": { + "description": "When enabled, the NGINX Controller Agent generates events related to the\nhealth of the workload group members. Two types of events are generated:\n- An event when the health status of a workload group member changes.\n- A periodic event per component with the health status of all workload groups.\n", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "default": "DISABLED" + } + } + }, + "MonitorAddHeader": { + "type": "object", + "required": [ + "action", + "headerName", + "headerValue" + ], + "properties": { + "action": { + "type": "string", + "description": "Adds monitor header details.", + "enum": [ + "ADD" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the monitor header to modify.", + "minLength": 1 + }, + "headerValue": { + "type": "string", + "description": "The value to apply to the monitor header.", + "minLength": 1 + } + } + }, + "MonitorDeleteHeader": { + "type": "object", + "required": [ + "action", + "headerName" + ], + "properties": { + "action": { + "type": "string", + "description": "Deletes monitor header details.", + "enum": [ + "DELETE" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the monitor header to modify.", + "minLength": 1 + } + } + }, + "MonitorHeaderModification": { + "type": "object", + "discriminator": { + "propertyName": "action", + "mapping": { + "ADD": "#/components/schemas/MonitorAddHeader", + "DELETE": "#/components/schemas/MonitorDeleteHeader" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/MonitorAddHeader" + }, + { + "$ref": "#/components/schemas/MonitorDeleteHeader" + } + ] + }, + "MonitorResponseStatus": { + "type": "object", + "properties": { + "range": { + "type": "object", + "properties": { + "startCode": { + "type": "integer", + "minimum": 100, + "maximum": 599 + }, + "endCode": { + "type": "integer", + "minimum": 100, + "maximum": 599 + } + } + }, + "codes": { + "type": "array", + "items": { + "type": "integer", + "minimum": 100, + "maximum": 599 + } + }, + "match": { + "type": "boolean", + "default": true + } + } + }, + "MonitorResponseContent": { + "type": "object", + "properties": { + "content": { + "type": "string" + }, + "match": { + "type": "boolean", + "default": true + } + } + }, + "WebMonitorResponse": { + "description": "Settings that define successful responses to a Web monitor.", + "type": "object", + "properties": { + "status": { + "$ref": "#/components/schemas/MonitorResponseStatus" + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/MonitorResponseContent" + } + }, + "body": { + "$ref": "#/components/schemas/MonitorResponseContent" + } + } + }, + "WebMonitoring": { + "description": "Health monitor settings across all workload groups in a Web Component.", + "allOf": [ + { + "$ref": "#/components/schemas/MonitoringCommon" + }, + { + "type": "object", + "properties": { + "uri": { + "description": "URI containing the relative path that the monitor probe is sent to; the host is specified in the URI in the workload group.", + "type": "string", + "default": "/" + }, + "headerModifications": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MonitorHeaderModification" + } + }, + "response": { + "description": "Settings that define successful responses to a Web monitor.", + "$ref": "#/components/schemas/WebMonitorResponse" + } + } + } + ] + }, + "RoundRobinLB": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "ROUND_ROBIN" + ] + } + } + }, + "IPHashLB": { + "type": "object", + "description": "IP Hash Load Balancing only applicable to Web Components.", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "IPHASH" + ] + } + } + }, + "LeastConnLB": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "LEAST_CONNECTIONS" + ] + } + } + }, + "HashLBMethod": { + "type": "object", + "required": [ + "type", + "userKey" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "HASH" + ] + }, + "userKey": { + "type": "string" + }, + "consistentHash": { + "$ref": "#/components/schemas/ServiceConfigState" + } + } + }, + "LeastTimeLBMethod": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "LEAST_TIME" + ] + }, + "latencyParameter": { + "type": "string", + "description": "Values applicable to a Web Component are: [HEADER, LAST_BYTE, LAST_BYTE_INFLIGHT];\nValues applicable to a TCP/UDP Component are: [CONNECT, FIRST_BYTE, LAST_BYTE, LAST_BYTE_INFLIGHT].\nThe default value is used for a web Component; there is no default for a TCP/UDP Component.\n", + "enum": [ + "HEADER", + "CONNECT", + "FIRST_BYTE", + "LAST_BYTE", + "LAST_BYTE_INFLIGHT" + ], + "default": "HEADER" + } + } + }, + "RandomLBMethod": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "RANDOM" + ] + }, + "twoServerLBMethod": { + "type": "string", + "description": "Values applicable to a Web Component are: [LEAST_CONNECTIONS, LEAST_TIME_HEADER, LEAST_TIME_LAST_BYTE];\nValues applicable to a TCP/UDP Component are: [LEAST_CONNECTIONS, LEAST_TIME_CONNECT, LEAST_TIME_FIRST_BYTE, LEAST_TIME_LAST_BYTE].\n", + "enum": [ + "LEAST_CONNECTIONS", + "LEAST_TIME_HEADER", + "LEAST_TIME_CONNECT", + "LEAST_TIME_FIRST_BYTE", + "LEAST_TIME_LAST_BYTE" + ] + } + } + }, + "LoadBalancingMethod": { + "discriminator": { + "propertyName": "type", + "mapping": { + "ROUND_ROBIN": "#/components/schemas/RoundRobinLB", + "IPHASH": "#/components/schemas/IPHashLB", + "LEAST_CONNECTIONS": "#/components/schemas/LeastConnLB", + "HASH": "#/components/schemas/HashLBMethod", + "LEAST_TIME": "#/components/schemas/LeastTimeLBMethod", + "RANDOM": "#/components/schemas/RandomLBMethod" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/RoundRobinLB" + }, + { + "$ref": "#/components/schemas/IPHashLB" + }, + { + "$ref": "#/components/schemas/LeastConnLB" + }, + { + "$ref": "#/components/schemas/HashLBMethod" + }, + { + "$ref": "#/components/schemas/LeastTimeLBMethod" + }, + { + "$ref": "#/components/schemas/RandomLBMethod" + } + ] + }, + "DNSServiceDiscovery": { + "required": [ + "servers" + ], + "properties": { + "servers": { + "type": "array", + "description": "Array of DNS servers. Possible options are:\n- An IPv4 address with an optional port number.\n Port 53 is used if not specified.\n For example, \"10.1.1.1\", \"10.1.1.1:5353\".\n- An IPv6 address with an optional port number.\n Port 53 is used if not specified.\n For example, \"[2001::1]\", \"[2001::1]:5353\",\n- Fully qualified domain name (FQDN). ASCII characters only.\n NGINX uses the OS name server configuration\n to identify the IP addresses of the DNS servers to use.\n", + "items": { + "type": "string", + "pattern": "^(?:(?:(?:(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)(?::(?:[1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5]))?$)|(?:\\[\\s*(?:(?:(?:[0-9a-fA-F]{1,4}:){7}(?:[0-9a-fA-F]{1,4}|:))|(?:(?:[0-9a-fA-F]{1,4}:){6}(?::[0-9a-fA-F]{1,4}|(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(?:(?:[0-9a-fA-F]{1,4}:){5}(?:(?:(?::[0-9a-fA-F]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(?:(?:[0-9a-fA-F]{1,4}:){4}(?:(?:(?::[0-9a-fA-F]{1,4}){1,3})|(?:(?::[0-9a-fA-F]{1,4})?:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9a-fA-F]{1,4}:){3}(?:(?:(?::[0-9a-fA-F]{1,4}){1,4})|(?:(?::[0-9a-fA-F]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9a-fA-F]{1,4}:){2}(?:(?:(?::[0-9a-fA-F]{1,4}){1,5})|(?:(?::[0-9a-fA-F]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9a-fA-F]{1,4}:){1}(?:(?:(?::[0-9a-fA-F]{1,4}){1,6})|(?:(?::[0-9a-fA-F]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?::(?:(?:(?::[0-9a-fA-F]{1,4}){1,7})|(?:(?::[0-9a-fA-F]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:)))(?:%.+)?](?::(?:[1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5]))?\\s*$)|(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[-0-9a-zA-Z]{0,61}[0-9a-zA-Z])?)*$))", + "minLength": 1, + "example": "10.1.1.1:5353" + }, + "minItems": 1, + "uniqueItems": true + }, + "ttl": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$", + "description": "Overrides the TTL setting present in the DNS record.", + "example": "10s" + }, + "timeout": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$", + "description": "Sets the timeout for domain name resolution.", + "example": "10s" + } + } + }, + "WorkloadGroupCommon": { + "description": "Settings common to Web and TCP/UDP workloadGroups.", + "type": "object", + "properties": { + "locationRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "loadBalancingMethod": { + "$ref": "#/components/schemas/LoadBalancingMethod" + }, + "dnsServiceDiscovery": { + "$ref": "#/components/schemas/DNSServiceDiscovery" + } + } + }, + "WebProxy": { + "description": "Proxy retry and timeout settings applicable to servers in a Web workloadGroup associated with a Component.", + "type": "object", + "properties": { + "nextUpstream": { + "description": "Specifies in which cases a request should be passed to the next server.", + "type": "array", + "items": { + "type": "string", + "enum": [ + "ERROR", + "TIMEOUT", + "INVALID_HEADER", + "HTTP_500", + "HTTP_502", + "HTTP_503", + "HTTP_504", + "HTTP_403", + "HTTP_404", + "HTTP_429", + "NON_IDEMPOTENT", + "OFF" + ] + } + }, + "connectTimeout": { + "description": "Defines a timeout for establishing a connection with a proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$" + }, + "sendTimeout": { + "description": "Sets a timeout for transmitting a request to the proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$" + } + } + }, + "SessionPersistenceCookie": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "COOKIE" + ] + }, + "srvID": { + "type": "string" + }, + "expireTime": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$" + }, + "domain": { + "type": "string" + }, + "path": { + "type": "string" + } + } + }, + "SessionPersistenceRoute": { + "type": "object", + "required": [ + "type", + "routeInfoLocation" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "ROUTE" + ] + }, + "routeInfoLocation": { + "type": "string", + "enum": [ + "COOKIE", + "URI", + "BOTH" + ] + } + } + }, + "SessionPersistenceCookieLearn": { + "type": "object", + "required": [ + "type", + "create", + "lookup" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "COOKIE_LEARN" + ] + }, + "create": { + "type": "string", + "pattern": "^\\$.+" + }, + "lookup": { + "type": "string", + "pattern": "^\\$.+" + } + } + }, + "SessionPersistence": { + "description": "SessionPersistence settings in a Web workloadGroup.", + "discriminator": { + "propertyName": "type", + "mapping": { + "COOKIE": "#/components/schemas/SessionPersistenceCookie", + "ROUTE": "#/components/schemas/SessionPersistenceRoute", + "COOKIE_LEARN": "#/components/schemas/SessionPersistenceCookieLearn" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/SessionPersistenceCookie" + }, + { + "$ref": "#/components/schemas/SessionPersistenceRoute" + }, + { + "$ref": "#/components/schemas/SessionPersistenceCookieLearn" + } + ] + }, + "WorkloadUri": { + "type": "object", + "properties": { + "weight": { + "type": "integer", + "minimum": 1, + "default": 1 + }, + "maxConns": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "maxFails": { + "type": "integer", + "minimum": 0, + "default": 1 + }, + "failTimeout": { + "x-f5-experimental": true, + "type": "string", + "default": "10s", + "pattern": "^[0-9]+[h|m|s]{1}$" + }, + "isBackup": { + "type": "boolean", + "default": false + }, + "isDown": { + "type": "boolean", + "default": false + }, + "route": { + "x-f5-experimental": true, + "type": "string" + }, + "srvService": { + "type": "string" + }, + "slowStart": { + "x-f5-experimental": true, + "type": "integer", + "minimum": 0, + "default": 0 + }, + "isDrain": { + "type": "boolean", + "default": false + } + } + }, + "WebWorkloadGroup": { + "description": "Group of servers hosting a part of a Web application represented by a Component.", + "allOf": [ + { + "$ref": "#/components/schemas/WorkloadGroupCommon" + }, + { + "type": "object", + "properties": { + "proxy": { + "$ref": "#/components/schemas/WebProxy" + }, + "sessionPersistence": { + "$ref": "#/components/schemas/SessionPersistence" + }, + "uris": { + "type": "object", + "description": "The URI for a server hosting a part of a Web application.\n\nIt must conform to the format `schema://address[:port]`\nwhere schema is chosen from http or https, address is IP or hostname,\nschema and address must be provided.\n\nFor example:\n\n- `http://192.0.2.247`\n- `https://192.0.2.247:8443`\n- `https://www.f5workload.com`\n", + "additionalProperties": { + "$ref": "#/components/schemas/WorkloadUri" + } + } + } + } + ] + }, + "BackendBuffers": { + "description": "Proxy buffer settings applicable to servers across all Web workloadGroups associated with a Component.", + "type": "object", + "properties": { + "headerSize": { + "description": "Sets the size of the buffer used for reading the first part of the response received from the proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size" + } + }, + "isEnabled": { + "description": "Enables or disables buffering of responses from the proxied server.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering" + } + }, + "size": { + "description": "Sets the number and size of the buffers used for reading a response from the proxied server, for a single connection.", + "type": "object", + "properties": { + "number": { + "type": "integer" + }, + "size": { + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$" + } + }, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers" + } + }, + "busySize": { + "description": "When buffering of responses from the proxied server is enabled, it limits the total size of buffers that can be busy sending a response to the client while the response is not yet fully read.", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_busy_buffers_size" + } + }, + "ignoreClientAbort": { + "description": "Determines whether the connection with a proxied server should be closed when a client closes the connection without waiting for a response.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_client_abort" + } + }, + "forceRanges": { + "description": "Enables byte-range support for both cached and uncached responses from the proxied server regardless of the \"Accept-Ranges\" field in these responses.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_force_ranges" + } + }, + "httpVersion": { + "description": "This field is deprecated. Use 'Backend.httpVersion' to set the desired HTTP Version.", + "type": "string", + "enum": [ + "1.0", + "1.1" + ], + "deprecated": true, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version" + } + }, + "rate": { + "description": "Limits the speed (in bytes per second) of reading the response from the proxied server.\nDisables rate limiting when set to `0`.\n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_limit_rate" + } + }, + "readTimeout": { + "description": "Defines a timeout for reading a response from the proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout" + } + }, + "maxTempFileSize": { + "description": "Sets the maximum size of the temporary file that the response can be saved into. Note that the buffering of responses from proxied server is enabled and the response does not fit into the buffers.\n\nDisables temporary file usage when set to 0.\n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_max_temp_file_size" + } + }, + "tempFileWriteSize": { + "description": "Limits the size of data written to a temporary file at a time, when buffering of responses from the proxied server to temporary files is enabled.\n\nDisables temporary file usage when set to 0.\n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_file_write_size" + } + } + } + }, + "WebBackend": { + "description": "Backend settings in a Web Component.\n", + "type": "object", + "properties": { + "keepAlive": { + "type": "object", + "properties": { + "connections": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "requestsPerConn": { + "type": "integer" + }, + "idleTimeout": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$", + "example": "1h" + } + } + }, + "monitoring": { + "$ref": "#/components/schemas/WebMonitoring" + }, + "workloadGroups": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/WebWorkloadGroup" + } + }, + "preserveHostHeader": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + }, + "queue": { + "x-f5-experimental": true, + "type": "object", + "required": [ + "length" + ], + "properties": { + "length": { + "type": "integer" + }, + "timeOut": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$", + "example": "1h" + } + } + }, + "httpVersion": { + "description": "Sets the HTTP protocol version for proxying.", + "type": "string", + "pattern": "^[1-3]{1}\\.[0-1]{1}$", + "example": 1 + }, + "ntlmAuthentication": { + "description": "Allows proxying requests with NTLM Authentication.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm" + } + }, + "persistentState": { + "x-f5-experimental": true, + "type": "string" + }, + "buffers": { + "$ref": "#/components/schemas/BackendBuffers" + }, + "isSocketKeepaliveEnabled": { + "description": "Configures the “TCP keepalive” behavior for outgoing connections to a proxied server.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_socket_keepalive" + } + }, + "ignoreHeaders": { + "description": "Disables processing of certain response header fields from the proxied server.", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "X-Accel-Redirect", + "X-Accel-Expires", + "X-Accel-Limit-Rate", + "X-Accel-Buffering", + "X-Accel-Charset", + "Expires", + "Cache-Control", + "Set-Cookie", + "Vary" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers" + } + }, + "debugHeaders": { + "description": "Permits passing otherwise disabled header fields from a proxied server to a client.", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "X-Accel-Redirect", + "X-Accel-Expires", + "X-Accel-Limit-Rate", + "X-Accel-Buffering", + "X-Accel-Charset", + "Expires", + "Cache-Control", + "Set-Cookie", + "Vary" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_header" + } + }, + "tls": { + "description": "TLS settings applicable to servers in Web workloadGroups.", + "type": "object", + "properties": { + "cipher": { + "description": "Specifies the enabled ciphers for requests to a proxied HTTPS server.", + "type": "string", + "example": "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_ciphers" + } + }, + "name": { + "description": "Allows overriding the server name used to verify the certificate of the proxied HTTPS server.", + "type": "string", + "example": "$proxy_host", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_name" + } + }, + "protocols": { + "description": "Enables the specified protocols for requests to a proxied HTTPS server.", + "type": "array", + "items": { + "type": "string", + "pattern": "TLSv1|TLSv1\\.[1-3]|SSLv2|SSLv3" + }, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_protocols" + } + }, + "isServerNameEnabled": { + "description": "Enables or disables passing of the server name through TLS Server Name Indication extension (SNI, RFC 6066) when establishing a connection with the proxied HTTPS server.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_server_name" + } + }, + "isSessionReuseEnabled": { + "description": "Determines whether SSL sessions can be reused when working with the proxied server.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_session_reuse" + } + }, + "isVerifiedEnabled": { + "description": "Enables or disables verification of the proxied HTTPS server certificate.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify" + } + }, + "verifyDepth": { + "description": "Sets the verification depth in the proxied HTTPS server certificates chain.", + "type": "integer", + "minimum": 0, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify_depth" + } + } + } + } + } + }, + "UriRewrite": { + "type": "object", + "required": [ + "incomingPattern", + "rewritePattern" + ], + "properties": { + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + }, + "incomingPattern": { + "type": "string", + "description": "The regex pattern to match against the request URIs that are expected to be rewritten." + }, + "rewritePattern": { + "type": "string", + "description": "The replacement regex pattern to apply to the URIs that are to be rewritten.", + "minLength": 1 + }, + "afterExecute": { + "type": "string", + "enum": [ + "NONE", + "LAST", + "BREAK", + "REDIRECT", + "PERMANENT" + ], + "default": "BREAK" + } + } + }, + "UriRedirect": { + "type": "object", + "required": [ + "responseCode" + ], + "properties": { + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + }, + "responseCode": { + "type": "integer", + "minimum": 300, + "maximum": 308 + }, + "url": { + "type": "string", + "minLength": 1, + "description": "The return url to use for responses in the 301-308 range." + }, + "text": { + "type": "string", + "description": "The return text to send for 300 responses." + } + } + }, + "ProgrammabilityAction": { + "type": "string", + "enum": [ + "ADD", + "MODIFY", + "DELETE" + ] + }, + "CookieModification": { + "type": "object", + "required": [ + "action", + "cookieName" + ], + "properties": { + "action": { + "$ref": "#/components/schemas/ProgrammabilityAction" + }, + "cookieName": { + "type": "string" + }, + "cookieValue": { + "type": "string" + } + } + }, + "ProgrammabilityAddRequestHeader": { + "type": "object", + "required": [ + "action", + "headerName", + "headerValue" + ], + "properties": { + "action": { + "type": "string", + "description": "Adds request header details.", + "enum": [ + "ADD" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the response header to modify.", + "minLength": 1 + }, + "headerValue": { + "type": "string", + "description": "The value to apply to the request header.", + "minLength": 1 + }, + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + } + } + }, + "ProgrammabilityDeleteRequestHeader": { + "type": "object", + "required": [ + "action", + "headerName" + ], + "properties": { + "action": { + "type": "string", + "description": "Deletes request header details.", + "enum": [ + "DELETE" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the request header to modify.", + "minLength": 1 + }, + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + } + } + }, + "RequestHeaderModification": { + "type": "object", + "discriminator": { + "propertyName": "action", + "mapping": { + "ADD": "#/components/schemas/ProgrammabilityAddRequestHeader", + "DELETE": "#/components/schemas/ProgrammabilityDeleteRequestHeader" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ProgrammabilityAddRequestHeader" + }, + { + "$ref": "#/components/schemas/ProgrammabilityDeleteRequestHeader" + } + ] + }, + "ProgrammabilityAddResponseHeader": { + "type": "object", + "required": [ + "action", + "headerName", + "headerValue" + ], + "properties": { + "action": { + "type": "string", + "description": "Adds response header details.", + "enum": [ + "ADD" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the response header to modify.", + "minLength": 1 + }, + "headerValue": { + "type": "string", + "description": "The value to apply to the response header.", + "minLength": 1 + }, + "responseCodeFilter": { + "type": "string", + "description": "The value to apply to the response code filter.", + "enum": [ + "ALWAYS", + "PRE_DEFINED" + ], + "default": "PRE_DEFINED" + }, + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + } + } + }, + "ProgrammabilityDeleteResponseHeader": { + "type": "object", + "required": [ + "action", + "headerName" + ], + "properties": { + "action": { + "type": "string", + "description": "Deletes response header details.", + "enum": [ + "DELETE" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the response header to modify.\n\n> Warning: `Date`, `Content-Length`, and `Connection` headers cannot be deleted.\n", + "minLength": 1 + }, + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + } + } + }, + "ResponseHeaderModification": { + "type": "object", + "discriminator": { + "propertyName": "action", + "mapping": { + "ADD": "#/components/schemas/ProgrammabilityAddResponseHeader", + "DELETE": "#/components/schemas/ProgrammabilityDeleteResponseHeader" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ProgrammabilityAddResponseHeader" + }, + { + "$ref": "#/components/schemas/ProgrammabilityDeleteResponseHeader" + } + ] + }, + "Programmability": { + "type": "object", + "properties": { + "uriRewrites": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UriRewrite" + } + }, + "httpHttpsRedirect": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + }, + "uriRedirects": { + "x-f5-experimental": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/UriRedirect" + } + }, + "cookieModifications": { + "x-f5-experimental": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/CookieModification" + } + }, + "requestHeaderModifications": { + "type": "array", + "items": { + "$ref": "#/components/schemas/RequestHeaderModification" + } + }, + "responseHeaderModifications": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResponseHeaderModification" + } + } + } + }, + "Logging": { + "description": "Settings for error logs and access logs.", + "type": "object", + "properties": { + "errorLog": { + "$ref": "#/components/schemas/ServiceConfigState" + }, + "accessLog": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ServiceConfigState" + }, + "format": { + "type": "string" + } + } + } + } + }, + "WAF": { + "description": "Defines the desired configurations for the WAF associated with the application component.", + "properties": { + "isEnabled": { + "type": "boolean", + "default": true, + "description": "Indicates whether the WAF is enabled or not." + }, + "isMonitorOnly": { + "type": "boolean", + "default": true, + "description": "Indicates whether the WAF will monitor or block security violations." + }, + "signatureOverrides": { + "type": "object", + "additionalProperties": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": [ + "IGNORE" + ] + } + }, + "description": "Identifies overrides for the signatures contained within the associated security strategy.\n", + "example": { + "1234": { + "action": "IGNORE" + }, + "1235": { + "action": "IGNORE" + } + } + } + } + } + }, + "APIKeyClientAuth": { + "description": "Defines how an API client should provide their API Key credentials.", + "required": [ + "keyLocation" + ], + "properties": { + "keyLocation": { + "type": "string", + "enum": [ + "HEADER", + "QUERY_PARAM" + ] + }, + "key": { + "type": "string" + } + } + }, + "JWTClientAuth": { + "description": "Defines how an API Client should provide their JWT.", + "required": [ + "keyLocation" + ], + "properties": { + "keyLocation": { + "type": "string", + "enum": [ + "BEARER", + "HEADER", + "QUERY_PARAM", + "COOKIE" + ] + }, + "key": { + "type": "string" + } + } + }, + "ConditionalAuthPolicy": { + "description": "Defines further fine-grained access control on top of API Key or JWT Auth.", + "required": [ + "sourceType", + "comparisonType", + "comparisonValues", + "action", + "denyStatusCode" + ], + "properties": { + "sourceType": { + "type": "string", + "enum": [ + "HEADER", + "JWT_CLAIM" + ] + }, + "sourceKey": { + "type": "string" + }, + "comparisonType": { + "type": "string", + "enum": [ + "EQUALS", + "NOT_EQUALS", + "IN", + "CONTAINS" + ] + }, + "comparisonValues": { + "type": "array", + "description": "Valid values for the sourceType.", + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + } + }, + "action": { + "type": "string", + "enum": [ + "ALLOW", + "DENY" + ] + }, + "denyStatusCode": { + "type": "integer" + } + } + }, + "RateLimit": { + "required": [ + "rate" + ], + "properties": { + "rate": { + "type": "string", + "pattern": "^[0-9]+r\\/[m|s]{1}$", + "description": "Sets the maximum number of allowed requests.\n\nYou can specify the rate limit as requests per second (r/s) or requests per minute (r/m).\n", + "example": "10r/s" + }, + "burstBeforeReject": { + "type": "integer", + "minimum": 0 + }, + "burstBeforeDelay": { + "type": "integer", + "minimum": 0 + }, + "statusCode": { + "type": "integer", + "default": 429 + }, + "key": { + "type": "string", + "default": "$binary_remote_addr", + "description": "Parameters (NGINX variable) for a shared memory zone that stores states for various keys; used for `limit_req_zone`." + } + } + }, + "Security": { + "type": "object", + "description": "Defines the desired security configurations for the application component.", + "properties": { + "strategyRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "waf": { + "$ref": "#/components/schemas/WAF" + }, + "identityProviderRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + }, + "description": "The list of Identity Providers that are used in this Security policy." + }, + "apiKeyClientAuth": { + "$ref": "#/components/schemas/APIKeyClientAuth" + }, + "jwtClientAuth": { + "$ref": "#/components/schemas/JWTClientAuth" + }, + "conditionalAuthPolicies": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ConditionalAuthPolicy" + } + }, + "rateLimits": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/RateLimit" + } + }, + "interceptWorkloadErrors": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + } + } + }, + "InstanceGroupRef": { + "allOf": [ + { + "$ref": "#/components/schemas/ResourceRef" + }, + { + "type": "object", + "properties": { + "listenIps": { + "description": "The list of Listen IP addresses.\nSets the BIG-IP virtual address(es) on which the server listens for and accepts requests.\n", + "type": "array", + "items": { + "type": "string", + "pattern": "^(?:(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)$", + "example": "1.1.1.1" + } + } + } + } + ] + }, + "Placement": { + "description": "Instances that have NGINX configuration applied corresponding to the Gateway and associated Components settings.", + "type": "object", + "properties": { + "instanceRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceRef" + } + }, + "instanceGroupRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceGroupRef" + } + } + } + }, + "GatewayIngressCommon": { + "description": "Ingress settings in a Gateway that apply to Web and TCP/UDP Components.", + "allOf": [ + { + "$ref": "#/components/schemas/GatewaySocketSettings" + }, + { + "type": "object", + "required": [ + "uris", + "placement" + ], + "properties": { + "uris": { + "$ref": "#/components/schemas/IngressUris" + }, + "tls": { + "$ref": "#/components/schemas/TLS" + }, + "placement": { + "$ref": "#/components/schemas/Placement" + } + } + } + ] + }, + "HeaderMatchMethod": { + "type": "string", + "enum": [ + "PREFIX", + "REGEX", + "REGEX_CASE_SENSITIVE", + "SUFFIX", + "EXACT" + ], + "default": "REGEX" + }, + "IngressHeader": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "nameMatchMethod": { + "$ref": "#/components/schemas/HeaderMatchMethod" + }, + "value": { + "type": "string" + }, + "valueMatchMethod": { + "$ref": "#/components/schemas/HeaderMatchMethod" + } + } + }, + "WebIngressCommon": { + "description": "Ingress settings in a Gateway and Component that apply only to Web Components.", + "type": "object", + "properties": { + "methods": { + "description": "Specifies the HTTP method to use in requests.", + "type": "array", + "items": { + "type": "string", + "enum": [ + "POST", + "GET", + "PUT", + "DELETE", + "PATCH", + "HEAD", + "TRACE", + "OPTIONS", + "CONNECT" + ] + } + }, + "clientMaxBodySize": { + "description": "Sets the maximum allowed size of the client request body, specified in the “Content-Length” request header field.", + "type": "string", + "pattern": "^[0-9]+[k|K|m|M]{1}$", + "deprecated": true, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size" + } + }, + "headers": { + "description": "Specifies the match method for headers to be used in requests.", + "x-f5-experimental": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/IngressHeader" + } + }, + "http2": { + "description": "Enable or disable HTTP/2 connections on the port. Normally, for this to work the `ssl` parameter should be specified as well,\nbut NGINX can also be configured to accept HTTP/2 connections without SSL.\nPossible values are `ENABLED` or `DISABLED`.\n", + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#listen" + } + }, + "spdy": { + "description": "Enables or disables acceptance of the SPDY connections on the specified port.\nNormally, for this to work the `ssl` parameter should be specified as well,\nbut NGINX can also be configured to accept SPDY connections without SSL. Possible values are `ENABLED` or `DISABLED`.\n", + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#listen" + } + }, + "proxyProtocol": { + "description": "Enables or disables the proxy protocol for all connections accepted on the specified port.\nThe `proxy protocol` enables NGINX and NGINX Plus to receive client connection information passed through proxy servers and load balancers,\nsuch as HAproxy and Amazon Elastic Load Balancer (ELB). The possible values are `ENABLED` or `DISABLED`.\n", + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#listen" + } + }, + "notFoundStatusCode": { + "x-f5-experimental": true, + "type": "integer", + "default": 404 + }, + "headersHashBucketSize": { + "description": "Sets the bucket size for hash tables used by the `proxy_hide_header` and `proxy_set_header` directives.", + "type": "integer", + "minimum": 1, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size" + } + }, + "headersHashMaxSize": { + "description": "Sets the maximum size of hash tables used by the `proxy_hide_header` and `proxy_set_header` directives.", + "type": "integer", + "minimum": 1, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size" + } + } + } + }, + "GatewayWebIngressClient": { + "description": "Non-buffer settings in a Gateway applicable to Web client requests.", + "type": "object", + "properties": { + "bodyInFileOnly": { + "description": "Determines whether NGINX Controller should save the entire client request body into a file.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED", + "CLEAN" + ], + "default": "DISABLED", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_file_only" + } + }, + "bodyTimeout": { + "description": "Defines a timeout for reading the client request body.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout" + } + }, + "headerTimeout": { + "description": "Defines a timeout for reading the client request header.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout" + } + }, + "maxBodySize": { + "description": "Sets the maximum allowed size of the client request body, specified in the “Content-Length” request header field.\n\nDisables checking of client request body size when set to 0.\n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size" + } + } + } + }, + "WebIngressBuffersCommon": { + "description": "Buffer settings common to a Gateway and Component applicable to web client requests.", + "type": "object", + "properties": { + "clientBodyBufferingIsEnabled": { + "description": "Enables or disables buffering of a client request body.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering" + } + }, + "clientBodyBufferSize": { + "description": "Sets the buffer size for reading the client request body.", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size" + } + }, + "clientBodyInSingleBuffer": { + "description": "Determines whether NGINX Controller should save the entire client request body in a single buffer.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_single_buffer" + } + } + } + }, + "GatewayWebIngress": { + "description": "Ingress settings in a Gateway that apply only to Web Components.", + "type": "object", + "properties": { + "client": { + "$ref": "#/components/schemas/GatewayWebIngressClient" + }, + "allowUnderscoresInHeaders": { + "type": "boolean", + "default": false, + "description": "Allows the use of underscores in client request header fields.\n\nWhen set to `disabled`, request headers with names that contain underscores are considered invalid and are ignored.\n" + }, + "buffers": { + "allOf": [ + { + "$ref": "#/components/schemas/WebIngressBuffersCommon" + }, + { + "type": "object", + "properties": { + "clientHeaderBufferSize": { + "description": "Sets the buffer size for reading the client request header.", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size" + } + } + } + } + ] + } + } + }, + "GatewayIngress": { + "description": "Ingress settings in a Gateway.", + "allOf": [ + { + "$ref": "#/components/schemas/GatewayIngressCommon" + }, + { + "$ref": "#/components/schemas/WebIngressCommon" + }, + { + "$ref": "#/components/schemas/GatewayWebIngress" + } + ] + }, + "GatewayDesiredState": { + "description": "The desired gateway settings that the user wants in the configuration on NGINX instances referenced by this Gateway.", + "allOf": [ + { + "$ref": "#/components/schemas/GatewayWebState" + }, + { + "$ref": "#/components/schemas/GatewayStateCommon" + }, + { + "type": "object", + "required": [ + "ingress" + ], + "properties": { + "ingress": { + "$ref": "#/components/schemas/GatewayIngress" + } + } + } + ] + }, + "CertMetadata": { + "type": "object", + "description": "Public certificate metadata.", + "required": [ + "authorityKeyIdentifier", + "commonName", + "expired", + "expiry", + "issuer", + "publicKeyType", + "serialNumber", + "signatureAlgorithm", + "subject", + "subjectAlternativeName", + "subjectKeyIdentifier", + "thumbprint", + "thumbprintAlgorithm", + "validFrom", + "validTo", + "version" + ], + "properties": { + "authorityKeyIdentifier": { + "type": "string", + "example": "2B D0 69 47 94 76 09 FE F4 6B 8D 2E 40 A6 F7 47 4D 7F 08 5E", + "description": "The identifier of the signing authority for the certificate." + }, + "commonName": { + "type": "string", + "example": "www.myapp.com", + "description": "The Common Name (CN) for the certificate. This is typically a Fully Qualified Domain Name (FQDN), and must be the same as the web address users access when connecting to a web site." + }, + "expired": { + "type": "boolean", + "example": false, + "description": "Indicates the expiration status of the certificate." + }, + "expiry": { + "type": "integer", + "example": 35500034, + "description": "The number of seconds until the certificate will expire." + }, + "issuer": { + "type": "string", + "example": "DigiCert Class 3 Extended Validation SSL SGC CA.", + "description": "Identifies the entity who signed and issued the certificate." + }, + "publicKeyType": { + "type": "string", + "example": "RSA (2048 Bits)", + "description": "Identifies the encryption algorithm used to create the public key for the ceritficate." + }, + "serialNumber": { + "type": "string", + "example": "16469416336579571270", + "description": "A unique identifier for the certificate." + }, + "signatureAlgorithm": { + "type": "string", + "example": "SHA-256", + "description": "Identifies the algorithm used to sign the certificate." + }, + "subject": { + "type": "string", + "example": "www.myapp.com", + "description": "Contains the Distinguished Name (DN) information for the certificate." + }, + "subjectAlternativeName": { + "type": "string", + "example": "DNS Name=static.xxxx", + "description": "Defines additional identifies bound to the subject of the certificate. For example, the DNS name is used to add addtional domain names to a certificate." + }, + "subjectKeyIdentifier": { + "type": "string", + "example": "31 EA 76 A9 23 74 A5 DF D4 FD EE A0 C1 A6 9E C6 11 0E 11 EC", + "description": "A hash value of the SSL certificate that can be used to identify certificates that contain a particular public key." + }, + "thumbprint": { + "type": "string", + "example": "E6 A7 87 96 E0 C7 A3 E5 43 78 35 CA 16 78 5B 48 5A A9 DD C4 5C CD 0A 65 AA 89 33 E3 C3 D0 89 71", + "description": "A hash to ensure that the certificate has not been modified." + }, + "thumbprintAlgorithm": { + "type": "string", + "example": "SHA-1", + "description": "Defines the algorithm used to hash the certificate." + }, + "validFrom": { + "type": "string", + "example": "2019-07-29T09:12:33.001Z", + "description": "The start of the validity period for the certificate." + }, + "validTo": { + "type": "string", + "example": "2029-07-29T09:12:33.001Z", + "description": "The end of the validity period for the certificate." + }, + "version": { + "type": "integer", + "example": 3, + "description": "The version of the certificate, typically 3 for X.509 certificates." + } + } + }, + "CertDesiredState": { + "type": "object", + "discriminator": { + "propertyName": "type", + "mapping": { + "PEM": "#/components/schemas/PEM", + "PKCS12": "#/components/schemas/PKCS12", + "REMOTE_FILE": "#/components/schemas/RemoteFile" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/PEM" + }, + { + "$ref": "#/components/schemas/PKCS12" + }, + { + "$ref": "#/components/schemas/RemoteFile" + } + ] + }, + "CertList": { + "type": "object", + "required": [ + "items" + ], + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "CertStatus": { + "type": "object", + "required": [ + "metadata", + "desiredState", + "currentStatus" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/CertDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/CertCurrentStatus" + } + } + }, + "CertCurrentStatus": { + "type": "object", + "description": "'Shows the current status of the certificate.\n\nWhen any certificates have expired, the Certs service sets `state.selfConfigState.isConfigured` and `state.selfConfigState.isError` to `true`. The service will also add a value to the conditons array with the type \"expiration\" and a message that shows when the first certificate will expire. For example, `conditions: [{type: \"expiration\", message: \"Certificate www.example.com will expire in 29 days.\"}])`'\n", + "required": [ + "state", + "certMetadata", + "type" + ], + "properties": { + "type": { + "type": "string" + }, + "privateKey": { + "type": "string" + }, + "publicCert": { + "type": "string" + }, + "data": { + "type": "string" + }, + "password": { + "type": "string" + }, + "caCerts": { + "type": "array", + "items": { + "type": "string" + } + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "certMetadata": { + "type": "array", + "description": "Public certificate metadata.", + "items": { + "$ref": "#/components/schemas/CertMetadata" + } + } + } + }, + "Cert": { + "type": "object", + "description": "Contains the certificate to upload.", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/CertDesiredState" + } + } + }, + "PEM": { + "type": "object", + "description": "Defines a PEM-formatted certificate that contains a key and certificates.\n\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\nThe private key data will be redacted in the response for all get and list requests.\n", + "required": [ + "privateKey", + "publicCert", + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "PEM" + ] + }, + "password": { + "type": "string", + "example": "myPa$$w0rd", + "description": "The passphrase to use to decrypt the private key. Required if the private key is encrypted." + }, + "privateKey": { + "type": "string", + "example": "-----BEGIN PRIVATE KEY-----\\n MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALSQBtRafNJtTqN0\\n nYiZq6TZUsHjfG2R9PlK6jsvno9O6amN96Al6ZSTTDjhr4VU7/RJ0p/cisiCboCX\\n 4cCq6lFKpIpeZJI=\\n -----END PRIVATE KEY-----", + "description": "The private key used to sign the public certificate.\n\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`). The private key data will be redacted in the response for all get and list requests.\n" + }, + "publicCert": { + "type": "string", + "example": "-----BEGIN CERTIFICATE-----\\n MIICpzCCAhACCQDkjx7mP9cuRjANBgkqhkiG9w0BAQsFADCBlzELMAkGA1UEBhMC\\n MiJVGawyxDzBm2UhzNOE0ABHfjAgM6PAYmtMhhQawk6bmttXYhJeqhLSji4LEj5d\\n Z4FmXQ5rWM0RWBs=\\n -----END CERTIFICATE-----", + "description": "The end-entity certificate, in PEM format.\n\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\n" + }, + "caCerts": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "-----BEGIN CERTIFICATE-----\\n MIIE+zCCBGSgAwIBAgICAQ0wDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1Zh\\n WBsUs5iB0QQeyAfJg594RAoYC5jcdnplDQ1tgMQLARzLrUc+cb53S8wGd9D0Vmsf\\n SxOaFIqII6hR8INMqzW/Rn453HWkrugp++85j09VZw==\\n -----END CERTIFICATE-----" + ], + "description": "An optional list of intermediate certificates in PEM format that are used to validate the public certificate.\n\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\n" + } + } + }, + "PKCS12": { + "type": "object", + "description": "Defines a cert with key and certificates encoded in PKCS12 format.", + "required": [ + "data", + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "PKCS12" + ] + }, + "data": { + "type": "string", + "example": "MIIGoQIBAzCCBmcGCSqGSIb3DQEHAaCCBlgEggZUMIIGUDCCA08GCSqGSIb3DQEHBqCCA0AwggM8AgEAMIIDNQYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQIe7ZblBoEW3QCAggAgIIDCCgLEvzp9n69QbpGT0MDEwITAJBgUrDgMCGgUABBQJs6ZgeAMcxVLrq1hU+TlUOArMuQQIGK59vCBn0wECAggA", + "description": "A base-64-encoded string that contains a private key, a public certificate, and, optionally, other intermediate certificates." + }, + "password": { + "type": "string", + "example": "myPa$$w0rd", + "description": "The password to use to decrypt PKCS12 data." + } + } + }, + "RemoteFile": { + "type": "object", + "description": "Define a Cert resource by providing references to remote files.\n\n> **Note:** These are file path references only. The system can not validate the file contents or extract the certificate metadata. Providing a PEM or PKCS12 certificate is recommended.\n", + "required": [ + "privateKey", + "publicCert", + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "REMOTE_FILE" + ] + }, + "privateKey": { + "type": "string", + "example": "/certs/www.example.com/example.key", + "description": "The path to the private key file." + }, + "publicCert": { + "type": "string", + "example": "/certs/www.example.com/example.crt", + "description": "The path to the certificate bundle file. The file must contain the public certificate and may contain additional intermediate certificates." + } + } + }, + "IdentityProviderList": { + "type": "object", + "description": "Contains a list of Identity Provider resources.", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "APIKeyIdentityProvider": { + "required": [ + "type" + ], + "type": "object", + "description": "Use an API key for authentication.\n\n> **Note:** Use of API Key authN is not recommended in production environments.\n", + "properties": { + "type": { + "type": "string", + "enum": [ + "API_KEY" + ] + } + } + }, + "JWTIdentityProvider": { + "required": [ + "jwkFile", + "type" + ], + "type": "object", + "description": "Use a JWT for authentication.", + "properties": { + "type": { + "type": "string", + "enum": [ + "JWT" + ] + }, + "jwkFile": { + "description": "Provide the path to - or URI for - a `.jwk` file to use for authentication.\nYou can also provide the `.jwk` file contents inline.\n", + "discriminator": { + "propertyName": "type", + "mapping": { + "INLINE": "#/components/schemas/JWKInline", + "REMOTE_FILE": "#/components/schemas/JWKRemoteFile" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/JWKInline" + }, + { + "$ref": "#/components/schemas/JWKRemoteFile" + } + ] + } + } + }, + "IdentityProviderDesiredState": { + "required": [ + "environmentRefs", + "identityProvider" + ], + "type": "object", + "properties": { + "environmentRefs": { + "description": "The Enviroment associated with the Identity Provider.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "identityProvider": { + "$ref": "#/components/schemas/IdentityProviderData" + } + }, + "example": { + "environmentRefs": [ + { + "ref": "/services/environments/env1" + } + ], + "identityProvider": { + "type": "JWT", + "jwkFile": { + "type": "REMOTE_FILE", + "uri": "https://example.com/keys.jwk", + "cacheExpire": "10h" + } + } + } + }, + "IdentityProviderCurrentStatus": { + "required": [ + "environmentRefs", + "identityProvider", + "state" + ], + "type": "object", + "properties": { + "environmentRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "identityProvider": { + "$ref": "#/components/schemas/IdentityProviderData" + } + } + }, + "IdentityProvider": { + "required": [ + "desiredState", + "metadata" + ], + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/IdentityProviderCurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/IdentityProviderDesiredState" + } + } + }, + "IdentityProviderData": { + "description": "The means of authentication used by the Identity Provider (JWT or APIKey).", + "oneOf": [ + { + "$ref": "#/components/schemas/JWTIdentityProvider" + }, + { + "$ref": "#/components/schemas/APIKeyIdentityProvider" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "API_KEY": "#/components/schemas/APIKeyIdentityProvider", + "JWT": "#/components/schemas/JWTIdentityProvider" + } + } + }, + "IdentityProviderClientDesiredState": { + "type": "object", + "properties": { + "credential": { + "$ref": "#/components/schemas/IdentityProviderClientCredential" + } + } + }, + "IdentityProviderClientCurrentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "credential": { + "$ref": "#/components/schemas/IdentityProviderClientCredential" + } + } + }, + "IdentityProviderClient": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/IdentityProviderClientCurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/IdentityProviderClientDesiredState" + } + } + }, + "IdentityProviderClientCredential": { + "oneOf": [ + { + "$ref": "#/components/schemas/IdentityProviderAPIKey" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "API_KEY": "#/components/schemas/IdentityProviderAPIKey" + } + } + }, + "IdentityProviderClientList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "UpdateIdentityProviderClient": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/IdentityProviderClientCurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/IdentityProviderClientDesiredState" + } + } + }, + "JWK": { + "type": "object", + "properties": { + "kty": { + "type": "string", + "description": "The cryptographic algorithm family used with the key, such as \"RSA\" or \"EC\"." + }, + "use": { + "type": "string", + "description": "The intended use of the public key, whether for encrypting data or verifying the signature on data." + }, + "key_ops": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The operation(s) for which the key is intended to be used." + }, + "alg": { + "type": "string", + "description": "The algorithm intended for use with the key." + }, + "kid": { + "type": "string", + "description": "The key ID used to match a specific key." + }, + "x5u": { + "type": "string", + "description": "The X.509 URL that refers to a resource for an X.509 public key certificate or certificate chain." + }, + "x5c": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The X.509 Certificate Chain of one or more PKIX certificates. The PKIX certificate containing the key value MUST be the first certificate." + }, + "x5t": { + "type": "string", + "description": "The X.509 Certificate SHA-1 Thumbprint (a.k.a. digest) of the DER encoding of an X.509 certificate." + }, + "x5t256": { + "type": "string", + "description": "The X.509 Certificate SHA-256 Thumbprint (a.k.a. digest) of the DER encoding of an X.509 certificate." + }, + "p2s": { + "type": "string", + "description": "The salt input value for PBES2 key encryption, which is used as part of the PBKDF2 salt value." + }, + "p2c": { + "type": "string", + "description": "The PBKDF2 iteration count for PBES2 key encryption, represented as a positive JSON integer. The iteration count adds computational expense, ideally compounded by the possible range of keys introduced by the salt. A minimum iteration count of 1000 is RECOMMENDED." + }, + "crv": { + "type": "string", + "description": "The cryptographic curve used for an Elliptic Curve public key." + }, + "x": { + "type": "string", + "description": "The x coordinate of the point for an Elliptic Curve public key." + }, + "y": { + "type": "string", + "description": "The y coordinate of the point for an Elliptic Curve public key." + }, + "e": { + "type": "string", + "description": "The exponent value for an RSA public key." + }, + "exp": { + "type": "string", + "description": "The exponent value for an RSA public key." + }, + "n": { + "type": "string", + "description": "The modulus value for an RSA public key." + }, + "mod": { + "type": "string", + "description": "The modulus value for an RSA public key." + }, + "d": { + "type": "string", + "description": "The private key value for an Elliptic Curve private key OR the private exponent value for an RSA private key." + }, + "p": { + "type": "string", + "description": "The first prime factor for an RSA private key." + }, + "q": { + "type": "string", + "description": "The second prime factor for an RSA private key." + }, + "dp": { + "type": "string", + "description": "The Chinese Remainder Theorem (CRT) exponent of the first factor for an RSA private key." + }, + "dq": { + "type": "string", + "description": "The CRT exponent of the second factor for an RSA private key." + }, + "qi": { + "type": "string", + "description": "The CRT coefficient of the second factor for an RSA private key." + }, + "oth": { + "description": "An array of information about any third and subsequent primes, should they exist.", + "type": "array", + "items": { + "type": "object", + "properties": { + "r": { + "type": "string", + "description": "The prime factor." + }, + "d": { + "type": "string", + "description": "The factor CRT exponent of the corresponding prime factor." + }, + "t": { + "type": "string", + "description": "The factor CRT coefficient of the corresponding prime factor." + } + } + } + }, + "iv": { + "type": "string", + "description": "The base64url-encoded representation of the 96-bit Initialization Vector value used for the AES GCM key encryption operation." + }, + "tag": { + "type": "string", + "description": "The base64url-encoded representation of the 128-bit Authentication Tag value resulting from the AES GCM key encryption operation." + }, + "k": { + "type": "string", + "description": "The key value of the symmetric (or other single-valued) key." + }, + "enc": { + "type": "string", + "description": "The encryption algorithm for JWE." + }, + "epk": { + "type": "object", + "description": "The ephemeral public key value created by the originator for use in ECDH-ES key agreement algorithms." + }, + "apu": { + "type": "string", + "description": "The agreement PartyUInfo for ECDH-ES key agreement algorithms, containing information about the producer." + }, + "apv": { + "type": "string", + "description": "The agreement PartyVInfo for ECDH-ES key agreement algorithms." + } + } + }, + "JWKInline": { + "required": [ + "type", + "keys" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "INLINE" + ] + }, + "keys": { + "type": "array", + "description": "The JSON Web Keys.\n", + "items": { + "$ref": "#/components/schemas/JWK" + }, + "example": [ + { + "k": "ZmFudGFzdGljand0", + "kty": "oct", + "kid": 1 + } + ] + } + }, + "description": "Inline contents of a JWK JSON file.\n" + }, + "JWKRemoteFile": { + "required": [ + "uri", + "type" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "REMOTE_FILE" + ] + }, + "uri": { + "type": "string" + }, + "cacheExpire": { + "pattern": "^[0-9]+[h|m|s]{1}$", + "type": "string", + "description": "The length of time for which to cache the remote file.\nNGINX will retrieve the file from the source URI when the cache time expires.\n", + "example": "10h" + } + } + }, + "IdentityProviderAPIKey": { + "required": [ + "type" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "API_KEY" + ] + }, + "apiKey": { + "type": "string", + "description": "If left empty, a key will automatically be generated.\nThe apikey must contain only alphanumeric characters, underscores, and hyphens.\nThe length of the apikey must be between 8 - 256 characters.\n", + "example": "ADv-2ZheQnL_jVx5klhQ39" + } + } + }, + "ListInstanceGroupsResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceGroup" + } + } + } + }, + "GetInstanceGroupResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/InstanceGroup" + } + ] + }, + "InstanceGroup": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/InstanceGroupState" + }, + "currentStatus": { + "$ref": "#/components/schemas/InstanceGroupStatus" + } + } + }, + "InstanceGroupState": { + "type": "object", + "properties": { + "bigIpIntegration": { + "$ref": "#/components/schemas/BigIpIntegration" + }, + "locationRef": { + "description": "Reference to location all instances of the group belong to. If not specified, the default 'unspecified' is assumed.", + "$ref": "#/components/schemas/ResourceRef" + } + } + }, + "InstanceGroupStatus": { + "type": "object", + "required": [ + "state" + ], + "properties": { + "instanceRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceRef" + } + }, + "bigIpIntegration": { + "$ref": "#/components/schemas/BigIpIntegration" + }, + "locationRef": { + "description": "Reference to location all instances of the group belong to. If not specified, the default 'unspecified' is assumed.", + "$ref": "#/components/schemas/ResourceRef" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + }, + "InstanceRef": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/ResourceRef" + } + ], + "description": "Reference to a member Instance resource." + }, + "BigIpIntegration": { + "type": "object", + "required": [ + "integrationRef", + "serverPoolIp" + ], + "properties": { + "integrationRef": { + "description": "Reference to a BIG-IP Integration object, indicating that the Instances will be members of a BIG-IP server pool.", + "$ref": "#/components/schemas/ResourceRef" + }, + "serverPoolIp": { + "description": "The Instance IP addresses or CIDR to use when the Instance is a member of a BIG-IP server pool.\nIf this is a CIDR, then the Instance IP address that matches the mask will be the member address in the BIG-IP server pool.\nOtherwise, absolute IP addresses will be used as server-pool member addresses.\n", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "DevPortalsList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "DevPortal": { + "type": "object", + "description": "A Dev Portal.", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/DevPortalDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/DevPortalCurrentStatus" + } + }, + "additionalProperties": false + }, + "DevPortalDesiredState": { + "type": "object", + "required": [ + "ingress" + ], + "properties": { + "ingress": { + "type": "object", + "required": [ + "gatewayRefs" + ], + "properties": { + "gatewayRefs": { + "type": "array", + "description": "Reference to the Gateways that act as a Developer Portal.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + }, + "additionalProperties": false + }, + "devPortalTheme": { + "$ref": "#/components/schemas/DevPortalTheme" + }, + "publishedApiRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "devPortalType": { + "type": "string", + "enum": [ + "private", + "public", + "partner" + ] + } + }, + "additionalProperties": false + }, + "DevPortalCurrentStatus": { + "type": "object", + "properties": { + "ingress": { + "type": "object", + "required": [ + "gatewayRefs" + ], + "properties": { + "gatewayRefs": { + "type": "array", + "description": "Reference to the Gateways that act as a Developer Portal.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "devPortalTheme": { + "$ref": "#/components/schemas/DevPortalTheme" + }, + "publishedApiRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "publishedTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T10:12:33.001Z", + "description": "Published time is a timestamp that represents the server time when the resource was published.\nResources that have never been published do not have an `published_time` stamp.\nThe default value is language-specific and, in general, should be equivalent of the null construct.\nIn JSON format, `published_time` type is encoded as a string as described in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n\nFor example: 2018-04-01T01:30:15.01Z\n" + } + } + }, + "DevPortalTheme": { + "description": "Specifies the theming for the Developer Portal.", + "type": "object", + "properties": { + "overrideDefaultTheme": { + "type": "boolean", + "example": false, + "description": "Override the default Dev Portal theme with a custom theme." + }, + "customConfig": { + "type": "object", + "properties": { + "primary": { + "$ref": "#/components/schemas/ThemeConfig" + }, + "secondary": { + "$ref": "#/components/schemas/ThemeConfig" + }, + "fonts": { + "type": "object", + "properties": { + "assignments": { + "$ref": "#/components/schemas/FontAssignments" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + "brandName": { + "$ref": "#/components/schemas/BrandName" + }, + "logo": { + "$ref": "#/components/schemas/FileEncodedString" + }, + "defaultLogo": { + "$ref": "#/components/schemas/FileEncodedString" + }, + "favicon": { + "$ref": "#/components/schemas/FileEncodedString" + } + }, + "additionalProperties": false + }, + "ThemeConfig": { + "type": "object", + "properties": { + "color": { + "$ref": "#/components/schemas/ThemeConfigColors" + } + }, + "additionalProperties": false + }, + "ThemeConfigColors": { + "type": "object", + "properties": { + "primary": { + "type": "string", + "example": "#575fe6", + "description": "A CSS color string used as a primary brand theme color." + }, + "accent": { + "type": "string", + "example": "#48dbac", + "description": "A CSS color string used as an optional second brand theme color." + }, + "gray": { + "type": "string", + "example": "#1e1f27", + "description": "A CSS color string used to generate a grayscale color palette." + }, + "link": { + "type": "string", + "example": "#0f55bd", + "description": "A CSS color string used to generate anchor link colors." + }, + "fill": { + "type": "string", + "example": "#fafbfc", + "description": "A CSS color string used as the main background color." + }, + "ink": { + "type": "string", + "example": "#323441", + "description": "A CSS color string used as the main text color." + }, + "status": { + "$ref": "#/components/schemas/ThemeConfigStatusColors" + } + }, + "additionalProperties": false + }, + "ThemeConfigStatusColors": { + "type": "object", + "description": "A collection of CSS color strings used to indicate status.", + "properties": { + "info": { + "type": "string", + "example": "#20a9ea", + "description": "A CSS color string used to indicate an informational status." + }, + "success": { + "type": "string", + "example": "#37c497", + "description": "A CSS color string used to indicate a success status." + }, + "error": { + "type": "string", + "example": "#ed4f54", + "description": "A CSS color string used to indicate an error status." + }, + "warning": { + "type": "string", + "example": "#ffb900", + "description": "A CSS color string used to indicate a warning status." + } + }, + "additionalProperties": false + }, + "ThemeConfigFonts": { + "type": "object", + "description": "A collection of fonts for theming typography.", + "properties": { + "headings": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for headlines." + }, + "body": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for body copy." + }, + "cta": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for calls to action." + }, + "code": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for code and syntax highlighting." + }, + "special": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for special accent typography." + }, + "baseFontSize": { + "$ref": "#/components/schemas/BaseFontSize" + }, + "embeddedLink": { + "$ref": "#/components/schemas/EmbeddedLink" + } + }, + "additionalProperties": false + }, + "ThemeConfigFont": { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "google-web-font" + ] + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false + }, + "FileEncodedString": { + "type": "string", + "example": "c29tZXRoaW5nIA==", + "description": "Base64 encoded string of a logo." + }, + "BrandName": { + "type": "string", + "example": "Acme", + "description": "Name of the brand." + }, + "BaseFontSize": { + "type": "integer", + "example": 14 + }, + "EmbeddedLink": { + "type": "string" + }, + "FontAssignments": { + "type": "object", + "$ref": "#/components/schemas/ThemeConfigFonts" + }, + "EnvironmentName": { + "type": "string" + }, + "EnvironmentList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "Environment": { + "type": "object", + "description": "An Environment is a logical container that you can use to organize your Apps. A few commonly-used examples of Environments are \"dev\" and \"production\".", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "x-f5-experimental": true, + "type": "object" + }, + "currentStatus": { + "$ref": "#/components/schemas/EnvironmentCurrentStatus" + } + } + }, + "EnvironmentCurrentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "gatewayRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "appRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "certRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "AppName": { + "type": "string" + }, + "APIDefinition": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/APIDefinitionCurrentStatus" + } + } + }, + "APIDefinitionCurrentStatus": { + "type": "object", + "properties": { + "apiDefinitionVersionRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "APIDefinitionList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "APIDefinitionSpecMapping": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/components/schemas/RESTAPISpec" + }, + { + "$ref": "#/components/schemas/gRPCProxySpec" + } + ] + } + }, + "RESTAPISpec": { + "type": "object", + "description": "Validates an Imported OpenAPI 3 spec formatted as JSON using the [OAS v3 schema.yaml](https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v3.0/schema.yaml) specification.\n" + }, + "gRPCProxySpec": { + "x-f5-experimental": true, + "type": "object", + "description": "Validates an Imported gRPC spec.\n" + }, + "APIDefinitionVersion": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/APIDefinitionVersionMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/APIDefinitionVersionDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/APIDefinitionVersionCurrentStatus" + } + } + }, + "APIDefinitionVersionList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "APIDefinitionVersionDesiredState": { + "type": "object", + "properties": { + "specs": { + "$ref": "#/components/schemas/APIDefinitionSpecMapping" + } + } + }, + "APIDefinitionVersionCurrentStatus": { + "type": "object", + "properties": { + "specs": { + "$ref": "#/components/schemas/APIDefinitionSpecMapping" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "publishedApiRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "apiDefinitionVersionRef": { + "type": "object", + "properties": { + "ref": { + "type": "string", + "description": "Reference to the Version of the API Definition.\n", + "example": "/services/api-definitions/baseball-stats/versions/v1" + }, + "links": { + "$ref": "#/components/schemas/NamedLinks" + } + } + }, + "APIDefinitionVersionMeta": { + "allOf": [ + { + "$ref": "#/components/schemas/ResourceMeta" + }, + { + "type": "object", + "properties": { + "isDefaultVersion": { + "type": "boolean" + } + } + } + ] + }, + "Instance": { + "type": "object", + "description": "An NGINX Instance.", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/InstanceCurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/InstanceDesiredState" + } + } + }, + "GetInstanceResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/Instance" + } + ] + }, + "ListInstanceResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "InstanceRequest": { + "allOf": [ + { + "$ref": "#/components/schemas/Instance" + } + ], + "description": "Describes the Instance to update." + }, + "InstanceDesiredState": { + "oneOf": [ + { + "$ref": "#/components/schemas/OtherInstanceDesiredState" + }, + { + "$ref": "#/components/schemas/AWSInstanceDesiredState" + }, + { + "$ref": "#/components/schemas/AzureInstanceDesiredState" + }, + { + "$ref": "#/components/schemas/VSphereInstanceDesiredState" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "OTHER_INSTANCE": "#/components/schemas/OtherInstanceDesiredState", + "AWS_INSTANCE": "#/components/schemas/AWSInstanceDesiredState", + "AZURE_INSTANCE": "#/components/schemas/AzureInstanceDesiredState", + "VSPHERE_INSTANCE": "#/components/schemas/VSphereInstanceDesiredState" + } + } + }, + "InstanceCurrentStatus": { + "oneOf": [ + { + "$ref": "#/components/schemas/OtherInstanceCurrentStatus" + }, + { + "$ref": "#/components/schemas/AWSInstanceCurrentStatus" + }, + { + "$ref": "#/components/schemas/AzureInstanceCurrentStatus" + }, + { + "$ref": "#/components/schemas/VSphereInstanceCurrentStatus" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "OTHER_INSTANCE": "#/components/schemas/OtherInstanceCurrentStatus", + "AWS_INSTANCE": "#/components/schemas/AWSInstanceCurrentStatus", + "AZURE_INSTANCE": "#/components/schemas/AzureInstanceCurrentStatus", + "VSPHERE_INSTANCE": "#/components/schemas/VSphereInstanceCurrentStatus" + } + } + }, + "OtherInstanceDesiredState": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "description": "OTHER_INSTANCE is an Instance pre-installed and self-registered during NGINX installation.\n", + "enum": [ + "OTHER_INSTANCE" + ] + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + } + } + }, + "AWSInstanceDesiredState": { + "type": "object", + "required": [ + "type", + "templateRef" + ], + "properties": { + "type": { + "type": "string", + "description": "AWS_INSTANCE is an Instance hosted in Amazon Web Services (AWS).\n", + "enum": [ + "AWS_INSTANCE" + ] + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + } + } + }, + "AzureInstanceDesiredState": { + "type": "object", + "required": [ + "type", + "templateRef" + ], + "properties": { + "type": { + "type": "string", + "description": "AZURE_INSTANCE is an Instance hosted in Microsoft Azure.", + "enum": [ + "AZURE_INSTANCE" + ] + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + } + } + }, + "VSphereInstanceDesiredState": { + "type": "object", + "required": [ + "type", + "templateRef" + ], + "properties": { + "type": { + "type": "string", + "description": "An Instance that resides on a VSphere host.\n", + "enum": [ + "VSPHERE_INSTANCE" + ] + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + } + } + }, + "OtherInstanceCurrentStatus": { + "type": "object", + "description": "Contains the current status of the Other Instance.", + "required": [ + "type", + "hostname", + "version", + "agent", + "status", + "muted" + ], + "properties": { + "hostname": { + "type": "string", + "description": "The hostname of the Instance." + }, + "version": { + "type": "string", + "description": "The version of NGINX running on the Instance." + }, + "muted": { + "type": "boolean", + "description": "Indicates the status of notifications and alerts.\n- 'True' means that notifications and alerts are muted.\n- 'False' means that notifications and alerts are active.\n", + "deprecated": true + }, + "type": { + "type": "string", + "description": "OTHER_INSTANCE is an Instance pre-installed and self-registered during NGINX installation.\n", + "enum": [ + "OTHER_INSTANCE" + ] + }, + "networkConfig": { + "$ref": "#/components/schemas/OtherNetworkConfig" + }, + "agent": { + "$ref": "#/components/schemas/Agent" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + }, + "status": { + "deprecated": true, + "allOf": [ + { + "$ref": "#/components/schemas/ConfigState" + } + ] + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "legacyNginxMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacyNginxMetadata" + }, + "legacySystemMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacySystemMetadata" + } + } + }, + "AWSInstanceCurrentStatus": { + "type": "object", + "description": "Contains the current status of the AWS Instance.", + "required": [ + "type", + "hostname", + "version", + "agent", + "muted" + ], + "properties": { + "hostname": { + "type": "string", + "description": "The hostname of the Instance." + }, + "instanceID": { + "type": "string", + "description": "The ID of the Instance." + }, + "version": { + "type": "string", + "description": "The version of NGINX running on the Instance." + }, + "muted": { + "type": "boolean", + "description": "Indicates the status of notifications and alerts.\n- 'True' means that notifications and alerts are muted.\n- 'False' means that notifications and alerts are active.\n", + "deprecated": true + }, + "type": { + "type": "string", + "description": "AWS_INSTANCE is an Instance hosted in Amazon Web Services (AWS).\n", + "enum": [ + "AWS_INSTANCE" + ] + }, + "networkConfig": { + "$ref": "#/components/schemas/AWSNetworkConfig" + }, + "agent": { + "$ref": "#/components/schemas/Agent" + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "legacyNginxMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacyNginxMetadata" + }, + "legacySystemMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacySystemMetadata" + } + } + }, + "AzureInstanceCurrentStatus": { + "type": "object", + "description": "Contains the current status of the Azure Instance.", + "required": [ + "type", + "hostname", + "version", + "agent" + ], + "properties": { + "hostname": { + "type": "string", + "description": "The hostname of the Instance." + }, + "instanceID": { + "type": "string", + "description": "The ID of the Instance." + }, + "version": { + "type": "string", + "description": "The version of NGINX running on the Instance." + }, + "muted": { + "type": "boolean", + "description": "Indicates the mute status of notifications and alerts.\n- 'True' means that notifications and alerts are muted.\n- 'False' means that notifications and alerts are active.\n", + "deprecated": true + }, + "type": { + "type": "string", + "description": "AZURE_INSTANCE is an Instance hosted in Microsoft Azure.", + "enum": [ + "AZURE_INSTANCE" + ] + }, + "networkConfig": { + "$ref": "#/components/schemas/AzureNetworkConfig" + }, + "agent": { + "$ref": "#/components/schemas/Agent" + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "legacyNginxMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacyNginxMetadata" + }, + "legacySystemMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacySystemMetadata" + } + } + }, + "VSphereInstanceCurrentStatus": { + "type": "object", + "description": "Reflects the current status of the VSphere Instance.", + "required": [ + "type", + "version", + "hostname", + "agent" + ], + "properties": { + "type": { + "type": "string", + "description": "An Instance that resides on a VSphere host.\nVSPHERE_INSTANCE is an Instance hosted in VSphere.\n", + "enum": [ + "VSPHERE_INSTANCE" + ] + }, + "hostname": { + "type": "string", + "description": "The hostname of the Instance." + }, + "instanceID": { + "type": "string", + "description": "The ID of the Instance." + }, + "version": { + "type": "string", + "description": "The version of NGINX running on the Instance." + }, + "agent": { + "$ref": "#/components/schemas/Agent" + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "legacyNginxMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacyNginxMetadata" + }, + "legacySystemMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacySystemMetadata" + } + } + }, + "TemplateRef": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/ResourceRef" + } + ], + "description": "Reference to an existing Instance Template resource. This field cannot be updated." + }, + "LegacySystemMetadata": { + "type": "object", + "deprecated": true, + "description": "Metadata that describe the operating system attributes and properties of an Instance host system. It is\nintended for internal use only and is subject to change.\n", + "additionalProperties": true + }, + "LegacyNginxMetadata": { + "type": "object", + "deprecated": true, + "description": "Metadata that describe an Instance's NGINX process configuration and properties. It is intended\nfor internal use only and is subject to change.\n", + "additionalProperties": true + }, + "Agent": { + "type": "object", + "description": "The properties of the Controller Agent running on the Instance", + "required": [ + "version" + ], + "properties": { + "version": { + "type": "string", + "description": "The version of Controller Agent that is currently running on the Instance." + }, + "online": { + "type": "boolean", + "description": "The status of Controller Agent that is currently running on the Instance." + }, + "credentials": { + "$ref": "#/components/schemas/AgentCredentials" + } + } + }, + "AgentCredentials": { + "type": "object", + "description": "The credentials of the Controller Agent running on the Instance.", + "properties": { + "hostname": { + "type": "string", + "description": "The hostname of the Agent." + }, + "uuid": { + "type": "string", + "description": "The uuid of the Agent." + } + } + }, + "OtherNetworkConfig": { + "type": "object", + "description": "The network config of a customer deployed Instance.", + "properties": { + "networkInterfaces": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OtherNetworkInterface" + } + } + } + }, + "OtherNetworkInterface": { + "type": "object", + "description": "A network interface for a customer deployed Instance.", + "properties": { + "name": { + "type": "string", + "description": "The name of the network interface attached to the Instance." + }, + "privateDnsName": { + "type": "string", + "description": "The private, internal hostname of the instance, which resolves to the Instance's private IP address.\n" + }, + "privateIP": { + "type": "string", + "description": "The private IP address of the network interface." + }, + "privateIPv6": { + "type": "string", + "description": "The private IPv6 address of the network interface." + }, + "alternateIPList": { + "type": "array", + "items": { + "type": "string" + } + }, + "alternateIPv6List": { + "type": "array", + "items": { + "type": "string" + } + }, + "subnet": { + "type": "object", + "description": "The subnet that contains the interface.", + "properties": { + "cidrIPv4": { + "type": "string", + "description": "The IPv4 CIDR for the subnet." + }, + "cidrIPv6": { + "type": "string", + "description": "The IPv6 CIDR for the subnet." + } + } + } + } + }, + "AWSNetworkConfig": { + "type": "object", + "description": "The network config of an AWS Instance.", + "properties": { + "networkInterfaces": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AWSNetworkInterface" + } + } + } + }, + "AWSNetworkInterface": { + "type": "object", + "description": "A network interface for an AWS Instance.", + "properties": { + "name": { + "type": "string", + "description": "The name of the network interface attached to the Instance." + }, + "privateDnsName": { + "type": "string", + "description": "The private, internal hostname of the instance, which resolves to the Instance's private IP address. \n" + }, + "publicDnsName": { + "type": "string", + "description": "The public hostname of the instance, which resolves to the public IP address of the Instance. \n" + }, + "privateIP": { + "type": "string", + "description": "The private IP address of the network interface." + }, + "publicIP": { + "type": "string", + "description": "The public IP address of the network interface." + }, + "subnet": { + "type": "object", + "description": "The subnet that contains the interface.", + "properties": { + "subnetID": { + "type": "string", + "description": "The ID of the subnet into which the instance was launched." + }, + "cidrIPv4": { + "type": "string", + "description": "The IPv4 CIDR for the subnet." + }, + "cidrIPv6": { + "type": "string", + "description": "The IPv6 CIDR for the subnet." + } + } + } + } + }, + "AzureNetworkConfig": { + "type": "object", + "description": "The network config of an Azure Instance.", + "properties": { + "networkInterfaces": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AzureNetworkInterface" + } + } + } + }, + "AzureNetworkInterface": { + "type": "object", + "description": "A network interface associated with an Azure Instance.", + "properties": { + "name": { + "type": "string", + "description": "The name of the network interface attached to the Instance." + }, + "publicDnsName": { + "type": "string", + "description": "The public hostname of the instance, which resolves to the public IP address of the Instance. \n" + }, + "privateIP": { + "type": "string", + "description": "The private IP address of the network interface." + }, + "publicIP": { + "type": "string", + "description": "The public IP address of the network interface." + }, + "privateIPv6": { + "type": "string", + "description": "The private IPv6 address of the network interface." + }, + "publicIPv6": { + "type": "string", + "description": "The public IPv6 address of the network interface." + }, + "subnet": { + "type": "object", + "description": "The subnet that contains the interface.", + "properties": { + "subnetID": { + "type": "string", + "description": "The ID of the subnet that contains the Instance." + }, + "cidrIPv4": { + "type": "string", + "description": "The IPv4 CIDR for the subnet." + }, + "cidrIPv6": { + "type": "string", + "description": "The IPv6 CIDR for the subnet." + } + } + } + } + }, + "Nginx": { + "type": "object", + "description": "Defines properties and configuration values for Nginx.\n", + "properties": { + "process": { + "$ref": "#/components/schemas/NginxProcess" + }, + "performance": { + "$ref": "#/components/schemas/Performance" + } + } + }, + "NginxProcess": { + "type": "object", + "description": "Defines configuration directives that are defined in the main configuration context.\n", + "properties": { + "user": { + "type": "string", + "description": "Defines user credentials used by worker processes.\n", + "default": "nginx" + }, + "group": { + "type": "string", + "description": "Defines group credentials used by worker processes. This will be ignored if the user\nproperty is not defined.\n", + "default": "nginx" + } + } + }, + "Performance": { + "type": "object", + "description": "Defines performance tuning directives that are defined in the main configuration context.\n", + "properties": { + "workerProcesses": { + "type": "integer", + "description": "Defines the number of worker processes. To set it to the number of available CPU cores\nfor a system, use 0.\n", + "minimum": 0, + "externalDocs": { + "url": "https://nginx.org/en/docs/ngx_core_module.html#worker_processes" + } + }, + "workerPriority": { + "type": "integer", + "description": "Defines the scheduling priority for worker processes like it is done by the nice command:\na negative number means higher priority. Allowed range normally varies from -20 to 20.\n", + "externalDocs": { + "url": "https://nginx.org/en/docs/ngx_core_module.html#worker_priority" + } + }, + "workerConnections": { + "type": "integer", + "description": "Sets the maximum number of simultaneous connections that can be opened by a worker process.\nThis number includes all the connections like proxied servers, connection with clients.\n", + "externalDocs": { + "url": "https://nginx.org/en/docs/ngx_core_module.html#worker_connections" + } + }, + "workerRlimitNofile": { + "type": "integer", + "description": "Sets the limit on the maximum number of open files for worker processes.\nUsed to increase the limit without restarting the main process.\n", + "externalDocs": { + "url": "https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile" + } + }, + "multiAccept": { + "type": "boolean", + "description": "Sets if the worker process will allow more than one connection at a time. If multiAccept is disabled,\nworker process only accepts one connection at a time.\n", + "externalDocs": { + "url": "https://nginx.org/en/docs/ngx_core_module.html#multi_accept" + } + } + } + }, + "SelfLinks": { + "type": "object", + "description": "The SelfLinks object contains a link from the resource to itself.\nThis object is used only in responses.\n", + "properties": { + "rel": { + "type": "string", + "example": "/api/v1/services/environments/prod", + "description": "`rel` contains the complete path fragment of a URI and can be used\nto construct a query to the object.\n" + } + } + }, + "ResourceMeta": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "pattern": "^[^A-Z\\s\\x00-\\x1f\\x60\\x7f\\;\\*\\\"\\[\\]\\{\\}\\\\\\/%\\?:=&\\~\\^|#<>]+$", + "not": { + "type": "string", + "enum": [ + ".", + ".." + ] + }, + "minLength": 1, + "maxLength": 1024, + "example": "resource-name", + "description": "Resource name is a unique identifier for a resource within the context of a namespace.\nResource names must conform to [RFC 1738 Section 2.2](https://www.ietf.org/rfc/rfc1738.txt) and have a valid syntax for email addresses. The following rules are enforced:\n\n- do not utilize URL encoding;\n- do not include spaces;\n- do not use uppercase characters, for example, 'A-Z'; extended character sets are supported;\n- do not use the following characters: `\"`, `*`, `:`, `;`, `/`, `\\`, `%`, `?`, `hash`, `=`, `&`, `|`, `~`, `^`, `{`, `}`, `[`, `]`, `<`, `>`;\n- cannot start or end with an `@` sign;\n- cannot be only `.` or `..`\n\nFor example: For a collection resource located at\n\n`https://controller.example.com/api/v1/services/apps/shopping_@1`\n\nthe resource name is \"shopping_@1\".\n" + }, + "displayName": { + "type": "string", + "example": "My Display Name", + "description": "`displayName` is a user friendly resource name. It can be used to define \na longer, and less constrained, name for a resource.\n\nDisplay names:\n\n- are optional (defaults to an empty string if no value is provided),\n- do not have to be unique, \n- cannot be assigned by the server.\n" + }, + "description": { + "type": "string", + "example": "This is a sample description string. It provides information about the resource.", + "description": "`description` is a free-form text property. You can use it to provide information that helps \nto identify the resource.\n\nDescriptions:\n\n- are optional (defaults to an empty string if no value is provided),\n- do not have to be unique, \n- cannot be assigned by the server.\n" + }, + "kind": { + "type": "string", + "example": "-", + "description": "Kind is a string representation of an API resource's data type.\nIt is assigned by the server and cannot be changed. \n\nWhen creating a `kind`, the server uses hyphens to connect word segments; \nsingleton and collection item resources are not pluralized.\n" + }, + "uid": { + "type": "string", + "format": "uuid", + "example": "d290f1ee-6c54-4b01-90e6-d701748f0851", + "description": "Unique Identifier (UID)\n\nUID is a unique identifier in time and space for a resource. \nWhen you create a resource, the server assigns a UID to the resource.\n\nRefer to [IETF RFC 4122](https://tools.ietf.org/html/rfc4122) for more information.\n" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ], + "description": "You can assign `tags` to a resource as a way to help map, scope, \nand organize resources. \n\nThe system uses tag selectors to specify selection criteria that \nmatch resources that have particular tags.\n" + }, + "ref": { + "type": "string", + "example": "/services/environments/prod", + "description": "The `ref` field contains a reference to another NGINX Controller resource.\n" + }, + "links": { + "$ref": "#/components/schemas/SelfLinks" + }, + "createTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T09:12:33.001Z", + "description": "A timestamp that represents the server time when the resource was created.\n\nCreate time is not guaranteed to be set in \"happens-before\" order\nacross separate operations.\n\nIn JSON format, `create_time` type is encoded as a string in the\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n\nFor example: 2018-04-01T01:30:15.01Z\n\nCreate Time is assigned by the server and cannot be changed.\n" + }, + "updateTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T10:12:33.001Z", + "description": "A timestamp that represents the server time when the resource was last modified.\n\nResources that have never been updated do not have an `update_time` stamp.\n\nThe default value for resources that have never been updated is the local \nlanguage-specific equivalent of \"null\".\n\nIn JSON format, `update_time` type is encoded as a string as described in \n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n" + } + } + }, + "ConfigStateTally": { + "type": "object", + "properties": { + "isConfigured": { + "type": "boolean", + "description": "The configuration operation is complete." + }, + "isConfiguring": { + "type": "boolean", + "description": "The configuration of the resource, or of its child(ren), is in process." + }, + "isError": { + "type": "boolean", + "description": "An error occurred while configuring the resource or its child(ren)." + }, + "isDeleting": { + "type": "boolean", + "description": "A delete operation is in progress for the resource or its child(ren)." + }, + "total": { + "type": "integer", + "description": "The total number of resources to which the configuration operation applies." + }, + "configured": { + "type": "integer", + "description": "The number of resources that have a complete and valid configuration." + }, + "configuring": { + "type": "integer", + "description": "The number of resources that are in the process of being configured." + }, + "error": { + "type": "integer", + "description": "The number of resources that have encountered an error during the configuration process." + }, + "deleting": { + "type": "integer", + "description": "The number of resources that are in the process of being deleted." + } + } + }, + "ConfigCondition": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The condition type." + }, + "message": { + "type": "string", + "description": "A human-readable message that provides additional information about the configuration operation." + } + } + }, + "ConfigState": { + "type": "object", + "description": "A representation of the resource's current configuration state \nthat comprises the status of the resource itself (`selfConfigState`) and any child \nresources (`childrenConfigState`).\n\nThe conditions array provides additional information during configuration changes.\n", + "properties": { + "selfConfigState": { + "$ref": "#/components/schemas/ConfigStateTally" + }, + "childrenConfigState": { + "$ref": "#/components/schemas/ConfigStateTally" + }, + "conditions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ConfigCondition" + } + } + } + }, + "NamedLinks": { + "allOf": [ + { + "$ref": "#/components/schemas/SelfLinks" + }, + { + "type": "object", + "description": "Contains information about the object being referred to.\n\nThese are generally details -- like the object name and display name --\nthat are useful to a consumer of the API that performs further\nprocessing. \n\nThis object is only present in responses.\n \n", + "properties": { + "name": { + "type": "string", + "example": "production", + "description": "The name of the linked resource.\n" + }, + "displayName": { + "type": "string", + "example": "Production Environment", + "description": "A user friendly resource name." + } + } + } + ] + }, + "ResourceRef": { + "type": "object", + "required": [ + "ref" + ], + "properties": { + "ref": { + "type": "string", + "example": "/services/environments/prod", + "description": "A reference to another NGINX Controller resource.\n" + }, + "links": { + "$ref": "#/components/schemas/NamedLinks" + } + } + }, + "ErrorDetail": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string", + "example": "Error doing : . This can lead to . Try to resolve the issue.", + "description": "A detailed error message returned by the server. \n\nThese messages contain the following information, where applicable:\n\n- What happened.\n- Why it happened.\n- What the consequences are (if any).\n- Recommended action to take to resolve the issue.\n" + } + } + }, + "ErrorModel": { + "type": "object", + "required": [ + "message", + "code" + ], + "properties": { + "message": { + "type": "string", + "example": "Error doing .", + "description": "A human-readable message, in English, that describes the error.\n" + }, + "code": { + "type": "integer", + "example": 1234567, + "description": "A numeric error code that can be used to identify errors for support purposes.\n" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorDetail" + } + } + } + }, + "ErrorSetList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorSet" + } + } + } + }, + "ErrorSet": { + "type": "object", + "description": "Defines the set of error messages to be returned for HTTP errors.", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/ErrorSetDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/ErrorSetCurrentStatus" + } + } + }, + "ErrorSetDesiredState": { + "type": "object", + "properties": { + "errorCodes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorCode" + } + } + } + }, + "ErrorSetCurrentStatus": { + "type": "object", + "properties": { + "errorCodes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorCode" + } + } + } + }, + "ErrorCode": { + "type": "object", + "required": [ + "code", + "message" + ], + "properties": { + "code": { + "type": "integer", + "minimum": 400, + "maximum": 599, + "example": 404 + }, + "message": { + "type": "string", + "example": "{\"status\":404,\"message\":\"Resource not found\"}" + } + } + }, + "PublishedAPIList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "PublishedAPI": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/PublishedAPICurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/PublishedAPIDesiredState" + } + } + }, + "PublishedAPICurrentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "basePath": { + "type": "string", + "default": "/" + }, + "stripWorkloadBasePath": { + "type": "boolean", + "default": true + }, + "componentRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "apiDefinitionVersionRef": { + "description": "Reference to the Version of the API Definition.\n", + "$ref": "#/components/schemas/ResourceRef" + }, + "gatewayRefs": { + "type": "array", + "description": "Reference to the Gateway associated with the Published API.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "devportalRefs": { + "type": "array", + "description": "Reference to the Dev Portal associated with the Published API.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "PublishedAPIDesiredState": { + "type": "object", + "required": [ + "apiDefinitionVersionRef", + "gatewayRefs" + ], + "properties": { + "basePath": { + "type": "string", + "default": "/" + }, + "stripWorkloadBasePath": { + "type": "boolean", + "default": true + }, + "apiDefinitionVersionRef": { + "description": "Reference to the Version of the API Definition.\n", + "$ref": "#/components/schemas/ResourceRef" + }, + "gatewayRefs": { + "type": "array", + "description": "Reference to the Gateway associated with the Published API.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "devportalRefs": { + "type": "array", + "description": "Reference to the Dev Portal associated with the Published API.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "InstanceTemplate": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/InstanceTemplateState" + }, + "currentStatus": { + "$ref": "#/components/schemas/InstanceTemplateState" + } + } + }, + "GetInstanceTemplateResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/InstanceTemplate" + } + ] + }, + "ListInstanceTemplateResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceTemplate" + } + } + } + }, + "InstanceTemplateState": { + "oneOf": [ + { + "$ref": "#/components/schemas/AWSInstanceTemplate" + }, + { + "$ref": "#/components/schemas/AzureInstanceTemplate" + }, + { + "$ref": "#/components/schemas/VSphereInstanceTemplate" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AWS_INSTANCE_TEMPLATE": "#/components/schemas/AWSInstanceTemplate", + "AZURE_INSTANCE_TEMPLATE": "#/components/schemas/AzureInstanceTemplate", + "VSPHERE_INSTANCE_TEMPLATE": "#/components/schemas/VSphereInstanceTemplate" + } + } + }, + "AWSInstanceTemplate": { + "type": "object", + "required": [ + "type", + "amiID", + "instanceType", + "subnetID" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of Instance Template.", + "enum": [ + "AWS_INSTANCE_TEMPLATE" + ] + }, + "amiID": { + "type": "string", + "description": "The AWS `amiID` for the image to use when deploying an Instance using the template.\n" + }, + "instanceType": { + "type": "string", + "description": "The machine size.\n" + }, + "subnetID": { + "type": "string", + "description": "The `subnetID` of the AWS subnet where new Instances created using the Instance Template should reside.\n\nThe specified subnet must be in the same AWS Virtual Private Cloud (VPC) as the Instance Template's parent Location resource.\n" + }, + "securityGroupIDs": { + "type": "array", + "description": "The list of AWS securityGroupIDs that you want to apply to new Instances. \n\nThe Security GroupIDs must be available in the same AWS region and Virtual Private Cloud (VPC) as the Instance Template's parent Location resource.\n", + "items": { + "type": "string" + } + }, + "publicKey": { + "type": "string", + "description": "Provide the public key that you want to use to authenticate to the EC2 instance that this template will create. \n" + }, + "associatePublicIPAddress": { + "type": "boolean", + "description": "Specify if a public IP address should be assigned to the instance.\n" + } + } + }, + "AzureInstanceTemplate": { + "type": "object", + "required": [ + "type", + "instanceType", + "image", + "networkInterface", + "adminUser", + "publicKey" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of Instance Template.", + "enum": [ + "AZURE_INSTANCE_TEMPLATE" + ] + }, + "image": { + "$ref": "#/components/schemas/AzureImage" + }, + "instanceType": { + "type": "string", + "description": "The virtual machine size and type." + }, + "networkInterface": { + "$ref": "#/components/schemas/AzureNetworkInterface" + }, + "adminUser": { + "type": "string", + "description": "The name of the administration account." + }, + "publicKey": { + "type": "string", + "description": "The Public Key string for the adminUser." + } + } + }, + "AzureImage": { + "type": "object", + "oneOf": [ + { + "$ref": "#/components/schemas/AzureImageID" + }, + { + "$ref": "#/components/schemas/AzureImageReference" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AZURE_IMAGE_ID": "#/components/schemas/AzureImageID", + "AZURE_IMAGE_REFERENCE": "#/components/schemas/AzureImageReference" + } + } + }, + "AzureImageID": { + "type": "object", + "description": "The Azure resource ID for the image to use when deploying an Instance.", + "required": [ + "type", + "imageID" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of the Azure Image.", + "enum": [ + "AZURE_IMAGE_ID" + ] + }, + "imageID": { + "type": "string", + "description": "The resource ID of the Azure image." + } + } + }, + "AzureImageReference": { + "type": "object", + "description": "The parameters that identify which Azure Marketplace image to use for the Instance.", + "required": [ + "type", + "publisher", + "offer", + "sku" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of the Azure Image.", + "enum": [ + "AZURE_IMAGE_REFERENCE" + ] + }, + "publisher": { + "type": "string", + "description": "The publisher of the Azure Marketplace image." + }, + "offer": { + "type": "string", + "description": "The offer of the Azure Marketplace image." + }, + "sku": { + "type": "string", + "description": "The SKU of the Azure Marketplace image." + }, + "version": { + "type": "string", + "description": "The version of the Azure Marketplace image (default is latest)." + } + } + }, + "AzureNetworkID": { + "type": "object", + "description": "Identifies the existing Azure Network Interface that you want the Instance to use.", + "required": [ + "type", + "nicID" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of the Azure Network Interface.", + "enum": [ + "AZURE_NIC_ID" + ] + }, + "nicID": { + "type": "string", + "description": "The ID of the Azure Network Interface." + } + } + }, + "VSphereInstanceTemplate": { + "type": "object", + "required": [ + "type", + "image", + "computeResource", + "numCPUs", + "memoryMB" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of Instance Template.", + "enum": [ + "VSPHERE_INSTANCE_TEMPLATE" + ] + }, + "image": { + "type": "string", + "description": "The VSphere virtual machine template that you want to deploy an Instance from.\nBe sure to specify the full path to the desired template.\n" + }, + "computeResource": { + "type": "string", + "description": "The name of the VSphere Host, Cluster, or Resource Pool that you want to add the Instance to.\nBe sure to provide the full path to the desired resource.\n" + }, + "datastore": { + "type": "array", + "minItems": 1, + "maxItems": 1, + "items": { + "type": "string" + }, + "description": "A list of VSphere datastores that you want to attach to the Instance.\nBe sure to specify the full path to the desired resource(s).\n> Note: When defined, this setting will override the datastore configured in the virtual machine template.\n" + }, + "network": { + "type": "array", + "minItems": 1, + "maxItems": 1, + "items": { + "type": "string" + }, + "description": "A list of the VSphere networks that you want to attach to the Instance. You can connect the Instance to VM Networks, Distributed Virtual Switches, and/or Port Groups.\n> Note: When defined, this setting will override the network configured in the virtual machine template.\n" + }, + "numCPUs": { + "type": "integer", + "description": "The number of vCPUs to configure on the Instance.\n" + }, + "memoryMB": { + "type": "integer", + "description": "The amount of memory (in MB) to configure on the Instance.\n" + } + } + }, + "StrategyDesiredState": { + "type": "object", + "description": "The defired state of the strategy", + "required": [ + "content" + ], + "properties": { + "content": { + "type": "object", + "$ref": "#/components/schemas/StrategyData" + } + } + }, + "StrategyList": { + "type": "object", + "required": [ + "items" + ], + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/StrategyStatus" + } + } + } + }, + "StrategyStatus": { + "type": "object", + "required": [ + "metadata", + "desiredState", + "currentStatus" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/StrategyDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/StrategyCurrentStatus" + } + } + }, + "StrategyCurrentStatus": { + "type": "object", + "description": "Shows the current status of the strategy.", + "required": [ + "state", + "content" + ], + "properties": { + "content": { + "type": "object", + "$ref": "#/components/schemas/StrategyData" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + }, + "Strategy": { + "type": "object", + "description": "Contains the strategy to upload.", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/StrategyDesiredState" + } + } + }, + "StrategyData": { + "type": "object", + "description": "Strategy Data.", + "required": [ + "securityPolicyRef" + ], + "properties": { + "securityPolicyRef": { + "type": "string", + "description": "Reference to the Nginx Application Protection policy used in this strategy.", + "example": "/security/policies/mynappolicy" + } + } + }, + "Integration": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/IntegrationState" + }, + "currentStatus": { + "$ref": "#/components/schemas/IntegrationState" + } + }, + "example": { + "metadata": { + "name": "my-aws-integration", + "endpointUri": "https://ec2.us-west-1.amazonaws.com", + "description": "AWS integration for us-west-1", + "displayName": "UsWest1-Integration", + "tags": [ + "us-west-1" + ], + "uid": "619887de-a748-4931-853d-c6b706f95ddf", + "createTime": "2019-09-18T16:42:15.1183523Z", + "updateTime": "2019-09-18T16:42:15.1183523Z" + }, + "desiredState": { + "type": "AWS_INTEGRATION", + "credential": { + "type": "AWS_ACCESS_KEY_CREDENTIAL", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + }, + "currentStatus": { + "type": "AWS_INTEGRATION", + "credential": { + "type": "AWS_ACCESS_KEY_CREDENTIAL", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + } + } + }, + "GetIntegrationResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/Integration" + } + ] + }, + "ListIntegrationResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Integration" + } + } + } + }, + "IntegrationState": { + "oneOf": [ + { + "$ref": "#/components/schemas/AWSIntegration" + } + ], + "discriminator": { + "propertyName": "type" + } + }, + "AWSIntegration": { + "required": [ + "type", + "credential" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "AWS_INTEGRATION" + ] + }, + "endpointUri": { + "type": "string" + }, + "credential": { + "oneOf": [ + { + "$ref": "#/components/schemas/AWSAccessKeyCredential" + } + ], + "discriminator": { + "propertyName": "type" + } + } + } + }, + "AWSAccessKeyCredential": { + "required": [ + "type", + "accessKeyID", + "secretAccessKey" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "AWS_ACCESS_KEY_CREDENTIAL" + ] + }, + "accessKeyID": { + "type": "string" + }, + "secretAccessKey": { + "type": "string" + } + } + }, + "PolicyDesiredState": { + "type": "object", + "description": "The defired state of the policy", + "required": [ + "content" + ], + "properties": { + "content": { + "type": "object", + "$ref": "#/components/schemas/PolicyData" + } + } + }, + "PolicyList": { + "type": "object", + "required": [ + "items" + ], + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "PolicyStatus": { + "type": "object", + "required": [ + "metadata", + "desiredState", + "currentStatus" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/PolicyDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/PolicyCurrentStatus" + } + } + }, + "PolicyCurrentStatus": { + "type": "object", + "description": "Shows the current status of the policy.", + "required": [ + "state", + "data" + ], + "properties": { + "content": { + "type": "object", + "$ref": "#/components/schemas/PolicyData" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + }, + "Policy": { + "type": "object", + "description": "Contains the policy to upload.", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/PolicyDesiredState" + } + } + }, + "PolicyData": { + "type": "object", + "description": "Contains the policy to upload.", + "example": { + "policy": { + "name": "mynappolicy", + "template": { + "name": "POLICY_TEMPLATE_NGINX_BASE" + }, + "applicationLanguage": "utf-8", + "enforcementMode": "blocking", + "signatures": [ + { + "signatureId": 123458888, + "enabled": false + }, + { + "signatureId": 200000098, + "enabled": false + }, + { + "signatureId": 200001475, + "enabled": false + }, + { + "signatureId": 200002595, + "enabled": false + } + ], + "bot-defense": { + "settings": { + "isEnabled": false + } + }, + "headers": [ + { + "name": "*", + "type": "wildcard", + "decodeValueAsBase64": "disabled" + }, + { + "name": "*-bin", + "type": "wildcard", + "decodeValueAsBase64": "required" + }, + { + "name": "Referer", + "type": "explicit", + "decodeValueAsBase64": "disabled" + }, + { + "name": "Authorization", + "type": "explicit", + "decodeValueAsBase64": "disabled" + }, + { + "name": "Transfer-Encoding", + "type": "explicit", + "decodeValueAsBase64": "disabled" + } + ], + "cookies": [ + { + "name": "*", + "type": "wildcard", + "decodeValueAsBase64": "disabled" + } + ], + "parameters": [ + { + "name": "*", + "type": "wildcard", + "decodeValueAsBase64": "disabled" + } + ] + } + } + } + }, + "examples": { + "IdentityProviderRequest": { + "value": { + "metadata": { + "name": "resource-name", + "displayName": "My Display Name", + "description": "This is a sample description string. It provides information about the resource.", + "tags": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ] + }, + "desiredState": { + "environmentRefs": [ + { + "ref": "/services/environments/dev" + } + ], + "identityProvider": { + "type": "API_KEY" + } + } + } + }, + "IdentityProviderClientListRequest": { + "value": { + "items": [ + { + "metadata": { + "name": "resource-name", + "displayName": "My Display Name", + "description": "This is a sample description string. It provides information about the resource.", + "tags": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ] + }, + "desiredState": { + "credential": { + "type": "API_KEY", + "apiKey": "ADv2ZheQnLjVx5klhQ39" + } + } + } + ] + } + }, + "IdentityProviderClientRequest": { + "value": { + "metadata": { + "name": "resource-name", + "displayName": "My Display Name", + "description": "This is a sample description string. It provides information about the resource.", + "tags": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ] + }, + "desiredState": { + "credential": { + "type": "API_KEY", + "apiKey": "ADv2ZheQnLjVx5klhQ39" + } + } + } + }, + "IdentityProviderClientPatchMetadataRequest": { + "value": { + "metadata": { + "name": "resource-name", + "description": "This is a sample description string. It provides information about the resource." + } + } + }, + "IdentityProviderClientPatchDesiredStateRequest": { + "value": { + "desiredState": { + "credential": { + "type": "API_KEY", + "apiKey": "ADv2ZheQnLjVx5klhQ39" + } + } + } + }, + "AWSInstanceRequest": { + "value": { + "metadata": { + "name": "nginx-dp-1", + "displayName": "nginx-dp-1", + "tags": [ + "prod-1", + "dev-1" + ] + }, + "desiredState": { + "type": "AWS_INSTANCE", + "templateRef": { + "ref": "/infrastructure/locations/aws-east/instance-templates/small-dev-template" + } + } + } + }, + "AzureInstanceRequest": { + "value": { + "metadata": { + "name": "nginx-dp-1", + "displayName": "nginx-dp-1", + "tags": [ + "prod-1", + "dev-1" + ] + }, + "desiredState": { + "type": "AZURE_INSTANCE", + "templateRef": { + "ref": "/infrastructure/locations/azure-westus2/instance-templates/small-dev-template" + } + } + } + }, + "VSphereInstanceRequest": { + "value": { + "metadata": { + "name": "nginx-dp-1", + "displayName": "nginx-dp-1" + }, + "desiredState": { + "type": "VSPHERE_INSTANCE", + "templateRef": { + "ref": "/infrastructure/locations/dc-sea/instance-templates/small-dev-template" + } + } + } + }, + "InstanceUpdateRequest": { + "value": { + "metadata": { + "name": "nginx-dp-1", + "displayName": "nginx-dp-1", + "description": "An example NGINX Instance.", + "tags": [ + "prod-1", + "dev-1" + ] + } + } + }, + "AWSInstance": { + "value": { + "metadata": { + "name": "instance-1", + "displayName": "My Instance", + "tags": [ + "prod-1", + "dev-1" + ], + "links": { + "rel": "/infrastructure/locations/unspecified/instances/instance-1" + }, + "createTime": "2019-07-29T09:12:33.001Z", + "updateTime": "2019-07-29T09:12:33.001Z" + }, + "desiredState": { + "type": "AWS_INSTANCE", + "templateRef": { + "ref": "/infrastructure/locations/aws-uswest-2/instance-templates/my-t2-medium" + } + }, + "currentStatus": { + "type": "AWS_INSTANCE", + "state": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "hostname": "instance-1.mycloud.net", + "version": "1.17.3", + "muted": false, + "networkConfig": { + "networkInterfaces": [ + { + "name": "eth0", + "privateDnsName": "ip-172-16-0-71.us-west-2.compute.internal", + "publicDnsName": "ec2-54-212-110-173.us-west-2.compute.amazonaws.com", + "privateIP": "172.16.0.71", + "publicIP": "54.212.110.173", + "subnet": { + "subnetID": "subnet-055d28be58feb0a7d", + "cidrIPv4": "172.16.0.0/24" + } + } + ] + }, + "agent": { + "credentials": { + "hostname": "instance-1.mycloud.net", + "uuid": "c1088edfd9f35cd38d5b4ce109508fe9" + }, + "version": 2.8 + }, + "legacyNginxMetadata": { + "build": "nginx-plus-r19", + "built_from_source": false, + "last_seen": "2019-11-12T23:47:52.966607Z", + "pid": 2749, + "properties": { + "conf-path": "/etc/nginx/nginx.conf", + "error-log-path": "/var/log/nginx/error.log", + "group": "nginx", + "http-client-body-temp-path": "/var/cache/nginx/client_temp", + "http-fastcgi-temp-path": "/var/cache/nginx/fastcgi_temp", + "http-log-path": "/var/log/nginx/access.log", + "http-proxy-temp-path": "/var/cache/nginx/proxy_temp", + "http-scgi-temp-path": "/var/cache/nginx/scgi_temp", + "http-uwsgi-temp-path": "/var/cache/nginx/uwsgi_temp", + "loadable_modules": [], + "lock-path": "/var/run/nginx.lock", + "modules-path": "/usr/lib/nginx/modules", + "packages": { + "nginx-plus": "19-1~bionic" + }, + "path": { + "bin": "/usr/sbin/nginx", + "conf": "/etc/nginx/nginx.conf" + }, + "pid-path": "/var/run/nginx.pid", + "prefix": "/etc/nginx", + "sbin-path": "/usr/sbin/nginx", + "status_module_enabled": false, + "stub_status_enabled": false, + "stub_status_url": null, + "user": "nginx", + "with-cc-opt": "-g -O2 -fdebug-prefix-map=/data/builder/debuild/nginx-plus-1.17.3/debian/debuild-base/nginx-plus-1.17.3=. -fstack-protector-strong -Wformat -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -fPIC", + "with-compat": true, + "with-file-aio": true, + "with-http_addition_module": true, + "with-http_auth_jwt_module": true, + "with-http_auth_request_module": true, + "with-http_dav_module": true, + "with-http_f4f_module": true, + "with-http_flv_module": true, + "with-http_gunzip_module": true, + "with-http_gzip_static_module": true, + "with-http_hls_module": true, + "with-http_mp4_module": true, + "with-http_random_index_module": true, + "with-http_realip_module": true, + "with-http_secure_link_module": true, + "with-http_session_log_module": true, + "with-http_slice_module": true, + "with-http_ssl_module": true, + "with-http_stub_status_module": true, + "with-http_sub_module": true, + "with-http_v2_module": true, + "with-ld-opt": "-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie", + "with-mail": true, + "with-mail_ssl_module": true, + "with-stream": true, + "with-stream_realip_module": true, + "with-stream_ssl_module": true, + "with-stream_ssl_preread_module": true, + "with-threads": true + }, + "running": true, + "ssl": { + "built": [ + "OpenSSL", + "1.1.1", + "11 Sep 2018" + ], + "run": [ + "OpenSSL", + "1.1.1", + "11 Sep 2018" + ] + }, + "start_time": 1573580604000 + }, + "legacySystemMetadata": { + "boot": 1573580280000, + "disk_partitions": [ + { + "device": "/dev/sda2", + "fstype": "ext4", + "mountpoint": "/" + }, + { + "device": "/dev/sda1", + "fstype": "ext3", + "mountpoint": "/boot" + } + ], + "network": { + "default": "lo", + "interfaces": { + "ipv4": { + "address": "127.0.0.1", + "netmask": "255.0.0.0", + "prefixlen": 8 + }, + "ipv6": { + "address": "::1", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "prefixlen": 128 + }, + "mac": "00:00:00:00:00:00", + "name": "lo" + } + }, + "os-type": "linux", + "processor": { + "architecture": "x86_64", + "cache": { + "L1d": "32K", + "L1i": "32K", + "L2": "1024K", + "L3": "25344K" + }, + "cores": 1, + "cpus": 4, + "hypervisor": "VMware", + "mhz": 2300, + "model": "Intel(R) Xeon(R) Gold 6140 CPU @ 2.30GHz", + "virtualization": "full" + }, + "release": { + "codename": "bionic", + "id": "ubuntu", + "name": "Ubuntu", + "version": "18.04.2 LTS (Bionic Beaver)", + "version_id": 18.04 + } + } + } + } + }, + "AzureInstance": { + "value": { + "metadata": { + "name": "instance-1", + "createTime": "2020-10-27T22:14:36.994172Z", + "description": "My Azure Instance", + "displayName": "Instance-1", + "tags": [ + "prod-1", + "dev-1" + ], + "kind": "instance", + "links": { + "rel": "/api/v1/infrastructure/locations/azure-westus2/instances/instance-1" + }, + "ref": "/infrastructure/locations/azure-westus2/instances/instance-1", + "uid": "4ed722ec-1bc0-47a1-9772-87718fa9ddb8", + "updateTime": "2020-10-27T22:14:36.994172Z" + }, + "desiredState": { + "nginx": { + "process": { + "group": "test", + "user": "testuser" + } + }, + "templateRef": { + "ref": "/infrastructure/locations/azure-westus2/instance-templates/azure-standard-ds1v2" + }, + "type": "AZURE_INSTANCE" + }, + "currentStatus": { + "networkConfig": { + "networkInterfaces": [ + { + "name": "my-nic-1", + "publicDnsName": "myapp.westus2.cloudapp.azure.com", + "privateIP": "10.0.1.4", + "publicIP": "52.229.16.198", + "subnet": { + "subnetID": "subnet-test", + "cidrIPv4": "10.0.1.0/24" + } + } + ] + }, + "agent": { + "credentials": { + "hostname": "instance-1.mycloud.net", + "uuid": "c1088edfd9f35cd38d5b4ce109508fe9" + }, + "version": "3.12.5" + }, + "hostname": "instance-1", + "instanceID": "ce266e67-42ec-41a0-b8f4-f4cd0be01828", + "legacyNginxMetadata": { + "build": "nginx-plus-r22", + "built_from_source": true, + "id": 6, + "last_seen": "2020-10-27T22:30:34.376659Z", + "pid": 1138, + "properties": { + "conf-path": "/etc/nginx/nginx.conf", + "error-log-path": "/var/log/nginx/error.log", + "group": "nginx", + "http-client-body-temp-path": "/var/cache/nginx/client_temp", + "http-fastcgi-temp-path": "/var/cache/nginx/fastcgi_temp", + "http-log-path": "/var/log/nginx/access.log", + "http-proxy-temp-path": "/var/cache/nginx/proxy_temp", + "http-scgi-temp-path": "/var/cache/nginx/scgi_temp", + "http-uwsgi-temp-path": "/var/cache/nginx/uwsgi_temp", + "loadable_modules": [ + "ngx_http_f5_metrics_module-debug.so", + "ngx_http_f5_metrics_module.so" + ], + "lock-path": "/var/run/nginx.lock", + "modules-path": "/usr/lib/nginx/modules", + "packages": { + "nginx-plus": "nginx-plus-r22" + }, + "path": { + "bin": "/usr/sbin/nginx", + "conf": "/etc/nginx/nginx.conf" + }, + "pid-path": "/var/run/nginx.pid", + "prefix": "/etc/nginx", + "sbin-path": "/usr/sbin/nginx", + "status_module_enabled": false, + "stub_status_enabled": false, + "stub_status_url": "", + "user": "nginx", + "with-compat": true, + "with-file-aio": true, + "with-http_addition_module": true, + "with-http_auth_jwt_module": true, + "with-http_auth_request_module": true, + "with-http_dav_module": true, + "with-http_f4f_module": true, + "with-http_flv_module": true, + "with-http_gunzip_module": true, + "with-http_gzip_static_module": true, + "with-http_hls_module": true, + "with-http_mp4_module": true, + "with-http_random_index_module": true, + "with-http_realip_module": true, + "with-http_secure_link_module": true, + "with-http_session_log_module": true, + "with-http_slice_module": true, + "with-http_ssl_module": true, + "with-http_stub_status_module": true, + "with-http_sub_module": true, + "with-http_v2_module": true, + "with-ld-opt": "'-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'", + "with-mail": true, + "with-mail_ssl_module": true, + "with-stream": true, + "with-stream_realip_module": true, + "with-stream_ssl_module": true, + "with-stream_ssl_preread_module": true, + "with-threads": true + }, + "running": true, + "ssl": { + "built": [ + "OpenSSL", + "1.1.1", + " 11 Sep 2018" + ], + "run": [ + "OpenSSL", + "1.1.1", + " 11 Sep 2018" + ] + }, + "start_time": 1603836993, + "version": "1.19.0" + }, + "legacySystemMetadata": { + "boot": 1603836911000, + "disk_partitions": [ + { + "device": "/dev/sda1", + "fstype": "ext4", + "mountpoint": "/" + }, + { + "device": "/dev/sda15", + "fstype": "vfat", + "mountpoint": "/boot/efi" + }, + { + "device": "/dev/sdb1", + "fstype": "ext4", + "mountpoint": "/mnt" + } + ], + "id": 5, + "network": { + "default": "eth0", + "interfaces": [ + { + "ipv4": { + "address": "127.0.0.1", + "netmask": "255.0.0.0", + "prefixlen": 8 + }, + "ipv6": { + "address": "::1", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "prefixlen": 128 + }, + "mac": "00:00:00:00:00:00", + "name": "lo" + }, + { + "ipv4": { + "address": "10.0.1.4", + "netmask": "255.255.255.0", + "prefixlen": 24 + }, + "ipv6": { + "address": "fe80::20d:3aff:fec5:3f80", + "netmask": "ffff:ffff:ffff:ffff::", + "prefixlen": 64 + }, + "mac": "00:0d:3a:c5:3f:80", + "name": "eth0" + } + ] + }, + "os-type": "linux", + "processor": { + "architecture": "x86_64", + "cache": { + "L1d": "32K", + "L1i": "32K", + "L2": "1024K", + "L3": "36608K" + }, + "cores": "1", + "cpus": "1", + "hypervisor": "Microsoft", + "mhz": "2095.191", + "model": "Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz", + "virtualization": "full" + }, + "release": { + "codename": "bionic", + "id": "ubuntu", + "name": "Ubuntu", + "version": "18.04.5 LTS (Bionic Beaver)", + "version_id": "18.04" + } + }, + "muted": false, + "nginx": { + "process": { + "group": "test", + "user": "testuser" + } + }, + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "templateRef": { + "ref": "/infrastructure/locations/azure-westus2/instance-templates/azure-standard-ds1v2" + }, + "type": "AZURE_INSTANCE", + "version": "1.19.0" + } + } + }, + "OtherInstance": { + "value": { + "metadata": { + "name": "test_instance_1", + "displayName": "Test Instance 1", + "links": { + "rel": "/api/v1/infrastructure/locations/unspecified/instances/test_instance_1" + }, + "createTime": "2020-05-20T20:02:18.107875Z", + "updateTime": "2020-05-20T20:02:18.107875Z" + }, + "desiredState": { + "type": "OTHER_INSTANCE" + }, + "currentStatus": { + "type": "OTHER_INSTANCE", + "state": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "status": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "agent": { + "credentials": { + "hostname": "instance-1.mycloud.net", + "uuid": "c1088edfd9f35cd38d5b4ce109508fe9" + }, + "version": "999.0.0-1" + }, + "hostname": "test-fab4edf8-data-1.test", + "legacyNginxMetadata": { + "build": "nginx-plus-r21", + "built_from_source": false, + "id": 2, + "last_seen": "2020-05-20T20:40:21.146894Z", + "pid": 2995, + "properties": { + "conf-path": "/etc/nginx/nginx.conf", + "error-log-path": "/var/log/nginx/error.log", + "group": "nginx", + "http-client-body-temp-path": "/var/cache/nginx/client_temp", + "http-fastcgi-temp-path": "/var/cache/nginx/fastcgi_temp", + "http-log-path": "/var/log/nginx/access.log", + "http-proxy-temp-path": "/var/cache/nginx/proxy_temp", + "http-scgi-temp-path": "/var/cache/nginx/scgi_temp", + "http-uwsgi-temp-path": "/var/cache/nginx/uwsgi_temp", + "loadable_modules": [ + "ngx_http_f5_metrics_module-debug.so", + "ngx_http_f5_metrics_module.so" + ], + "lock-path": "/var/run/nginx.lock", + "modules-path": "/usr/lib/nginx/modules", + "packages": { + "nginx-plus": "21-1~bionic" + }, + "path": { + "bin": "/usr/sbin/nginx", + "conf": "/etc/nginx/nginx.conf" + }, + "pid-path": "/var/run/nginx.pid", + "prefix": "/etc/nginx", + "sbin-path": "/usr/sbin/nginx", + "status_module_enabled": false, + "stub_status_enabled": false, + "stub_status_url": null, + "user": "nginx", + "with-cc-opt": "'-g -O2 -fdebug-prefix-map=/data/builder/debuild/nginx-plus-1.17.9/debian/debuild-base/nginx-plus-1.17.9=. -fstack-protector-strong -Wformat -Werror=format-security,-D_FORTIFY_SOURCE=2 -fPIC'", + "with-compat": true, + "with-file-aio": true, + "with-http_addition_module": true, + "with-http_auth_jwt_module": true, + "with-http_auth_request_module": true, + "with-http_dav_module": true, + "with-http_f4f_module": true, + "with-http_flv_module": true, + "with-http_gunzip_module": true, + "with-http_gzip_static_module": true, + "with-http_hls_module": true, + "with-http_mp4_module": true, + "with-http_random_index_module": true, + "with-http_realip_module": true, + "with-http_secure_link_module": true, + "with-http_session_log_module": true, + "with-http_slice_module": true, + "with-http_ssl_module": true, + "with-http_stub_status_module": true, + "with-http_sub_module": true, + "with-http_v2_module": true, + "with-ld-opt": "'-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'", + "with-mail": true, + "with-mail_ssl_module": true, + "with-stream": true, + "with-stream_realip_module": true, + "with-stream_ssl_module": true, + "with-stream_ssl_preread_module": true, + "with-threads": true + }, + "running": true, + "ssl": { + "built": [ + "OpenSSL", + "1.1.1", + "11 Sep 2018" + ], + "run": [ + "OpenSSL", + "1.1.1", + "11 Sep 2018" + ] + }, + "start_time": 1590004864000 + }, + "legacySystemMetadata": { + "boot": 1590004492000, + "disk_partitions": [ + { + "device": "/dev/sda2", + "fstype": "ext4", + "mountpoint": "/" + }, + { + "device": "/dev/sda1", + "fstype": "ext3", + "mountpoint": "/boot" + } + ], + "id": 1, + "network": { + "default": "lo", + "interfaces": [ + { + "ipv4": { + "address": "127.0.0.1", + "netmask": "255.0.0.0", + "prefixlen": 8 + }, + "ipv6": { + "address": "::1", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "prefixlen": 128 + }, + "mac": "00:00:00:00:00:00", + "name": "lo" + }, + { + "ipv4": { + "address": "192.0.10.1", + "netmask": "255.255.255.0", + "prefixlen": 24 + }, + "mac": "02:42:98:fb:40:48", + "name": "docker0" + }, + { + "ipv4": { + "address": "10.149.41.181", + "netmask": "255.255.240.0", + "prefixlen": 20 + }, + "ipv6": { + "address": "fe80::250:56ff:fe98:e2f1", + "netmask": "ffff:ffff:ffff:ffff::", + "prefixlen": 64 + }, + "mac": "00:50:56:98:e2:f1", + "name": "ens32" + } + ] + }, + "os-type": "linux", + "processor": { + "architecture": "x86_64", + "cache": { + "L1d": "32K", + "L1i": "32K", + "L2": "1024K", + "L3": "25344K" + }, + "cores": "1", + "cpus": "4", + "hypervisor": "VMware", + "mhz": "2300.000", + "model": "Intel(R) Xeon(R) Gold 6140 CPU @ 2.30GHz", + "virtualization": "full" + }, + "release": { + "codename": "bionic", + "id": "ubuntu", + "name": "Ubuntu", + "version": "18.04.2 LTS (Bionic Beaver)", + "version_id": "18.04" + } + }, + "muted": false, + "version": "1.17.9" + } + } + }, + "ListInstanceResponse": { + "value": { + "items": [ + { + "currentStatus": { + "agent": { + "version": "3.7.44" + }, + "hostname": "i-563457274582", + "legacyNginxMetadata": { + "build": "nginx-plus-r22", + "built_from_source": false, + "id": 2, + "last_seen": "2020-07-08T17:24:07.869745Z", + "pid": 3104, + "properties": { + "conf-path": "/etc/nginx/nginx.conf", + "error-log-path": "/var/log/nginx/error.log", + "group": "nginx", + "http-client-body-temp-path": "/var/cache/nginx/client_temp", + "http-fastcgi-temp-path": "/var/cache/nginx/fastcgi_temp", + "http-log-path": "/var/log/nginx/access.log", + "http-proxy-temp-path": "/var/cache/nginx/proxy_temp", + "http-scgi-temp-path": "/var/cache/nginx/scgi_temp", + "http-uwsgi-temp-path": "/var/cache/nginx/uwsgi_temp", + "loadable_modules": [ + "ngx_http_f5_metrics_module-debug.so", + "ngx_http_f5_metrics_module.so" + ], + "lock-path": "/var/run/nginx.lock", + "modules-path": "/usr/lib/nginx/modules", + "packages": {}, + "path": { + "bin": "/usr/sbin/nginx", + "conf": "/etc/nginx/nginx.conf" + }, + "pid-path": "/var/run/nginx.pid", + "prefix": "/etc/nginx", + "sbin-path": "/usr/sbin/nginx", + "status_module_enabled": false, + "stub_status_enabled": false, + "stub_status_url": "", + "user": "nginx", + "with-compat": true, + "with-file-aio": true, + "with-http_addition_module": true, + "with-http_auth_jwt_module": true, + "with-http_auth_request_module": true, + "with-http_dav_module": true, + "with-http_f4f_module": true, + "with-http_flv_module": true, + "with-http_gunzip_module": true, + "with-http_gzip_static_module": true, + "with-http_hls_module": true, + "with-http_mp4_module": true, + "with-http_random_index_module": true, + "with-http_realip_module": true, + "with-http_secure_link_module": true, + "with-http_session_log_module": true, + "with-http_slice_module": true, + "with-http_ssl_module": true, + "with-http_stub_status_module": true, + "with-http_sub_module": true, + "with-http_v2_module": true, + "with-ld-opt": "'-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'", + "with-mail": true, + "with-mail_ssl_module": true, + "with-stream": true, + "with-stream_realip_module": true, + "with-stream_ssl_module": true, + "with-stream_ssl_preread_module": true, + "with-threads": true + }, + "running": true, + "ssl": { + "built": [ + "OpenSSL", + "1.1.1", + "", + "11", + "Sep", + "2018" + ], + "run": [ + "OpenSSL", + "1.1.1", + "", + "11", + "Sep", + "2018" + ] + }, + "start_time": 0 + }, + "legacySystemMetadata": { + "boot": 1594225800000, + "disk_partitions": [ + { + "device": "/dev/sda2", + "fstype": "ext4", + "mountpoint": "/" + }, + { + "device": "/dev/sda1", + "fstype": "ext3", + "mountpoint": "/boot" + } + ], + "id": 1, + "network": { + "default": "", + "interfaces": [ + { + "ipv4": { + "address": "127.0.0.1", + "netmask": "255.0.0.0", + "prefixlen": 8 + }, + "ipv6": { + "address": "::1", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "prefixlen": 128 + }, + "mac": "00:00:00:00:00:00", + "name": "lo" + }, + { + "ipv4": { + "address": "10.149.41.97", + "netmask": "255.255.240.0", + "prefixlen": 20 + }, + "ipv6": { + "address": "fe80::250:56ff:fe98:b512", + "netmask": "ffff:ffff:ffff:ffff::", + "prefixlen": 64 + }, + "mac": "00:50:56:98:b5:12", + "name": "ens32" + }, + { + "ipv4": { + "address": "192.0.10.1", + "netmask": "255.255.255.0", + "prefixlen": 24 + }, + "mac": "02:42:24:f1:ae:8a", + "name": "docker0" + } + ] + }, + "os-type": "linux", + "processor": { + "architecture": "x86_64", + "cache": { + "L1d": "32K", + "L1i": "32K", + "L2": "1024K", + "L3": "25344K" + }, + "cores": "1", + "cpus": "4", + "hypervisor": "VMware", + "mhz": "2300.000", + "model": "Intel(R) Xeon(R) Gold 6140 CPU @ 2.30GHz", + "virtualization": "full" + }, + "release": { + "codename": "bionic", + "id": "ubuntu", + "name": "Ubuntu", + "version": "18.04.2 LTS (Bionic Beaver)", + "version_id": "18.04" + } + }, + "muted": false, + "state": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "status": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "type": "OTHER_INSTANCE", + "version": "1.19.0" + }, + "desiredState": { + "type": "OTHER_INSTANCE" + }, + "metadata": { + "createTime": "2020-07-08T16:42:07.97301Z", + "displayName": "Test Instance 1", + "kind": "instance", + "links": { + "rel": "/api/v1/infrastructure/locations/unspecified/instances/instance-1" + }, + "name": "instance-1", + "ref": "/infrastructure/locations/unspecified/instances/instance-1", + "uid": "ec8d8dac-10b6-4195-943b-1a5d65dd131c", + "updateTime": "2020-07-08T16:42:07.97301Z" + } + } + ] + } + }, + "InstanceGroupRequest": { + "value": { + "metadata": { + "name": "k8s-nginx-deploy", + "displayName": "K8S NGINX+ deployment", + "description": "k8s-nginx-deploy" + }, + "desiredState": {} + } + }, + "GetInstanceGroupResponse": { + "value": { + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + }, + "desiredState": {}, + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + } + } + }, + "ListInstanceGroupsResponse": { + "value": { + "items": [ + { + "currentStatus": { + "instanceRefs": [], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": {}, + "metadata": { + "createTime": "2020-05-13T09:29:49.464273Z", + "description": "us-west-2 autoscale group", + "displayName": "aws-autoscale-group", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/amz-us-west-2-as-group" + }, + "name": "amz-us-west-2-as-group", + "uid": "802ef1f8-9105-474a-b9a7-599837efd6b4", + "updateTime": "2020-05-13T09:29:49.464273Z" + } + }, + { + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": {}, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + ] + } + }, + "ComponentRequest": { + "value": { + "metadata": { + "name": "resource-name", + "displayName": "My Display Name", + "description": "This is a sample description string. It provides information about the resource.", + "tags": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ] + }, + "desiredState": { + "ingress": { + "uris": { + "/api/golf/": { + "matchMethod": "PREFIX" + } + }, + "gatewayRefs": [ + { + "ref": "/services/environments/dev/gateways/sportsgw" + } + ] + }, + "publishedApiRefs": [ + { + "ref": "/services/environments/dev/apps/sports/published-apis/golf-pub" + } + ], + "backend": { + "workloadGroups": { + "serverGrp1": { + "uris": { + "{{httpWorkloadInstance1}}": {} + } + } + }, + "monitoring": {} + }, + "security": { + "rateLimit": { + "defaultLimit": { + "rate": 1, + "rateMeasure": "SECONDS" + } + } + }, + "caching": { + "splitConfig": { + "key": "${remote_addr}${http_user_agent}${date_gmt}", + "criteriaType": "PERCENTAGE" + }, + "diskStores": [ + { + "path": "/tmp/hdd1", + "maxSize": "5G", + "minFree": "10M", + "inMemoryStoreSize": "100M", + "tempPath": "ENABLED", + "inactiveTime": "2m", + "percentCriteria": "100%", + "directoryLevel": { + "first": 1, + "mid": 2, + "last": 1 + }, + "trimPolicy": { + "maxFiles": 150, + "frequency": "100m", + "durationThreshold": "30m" + }, + "loaderPolicy": { + "maxFiles": 150, + "frequency": "100m", + "durationThreshold": "30m" + }, + "purgerPolicy": { + "maxFiles": 150, + "frequency": "100m", + "durationThreshold": "30m" + } + } + ] + } + } + } + }, + "OtherLocationRequest": { + "value": { + "metadata": { + "name": "my-other-location", + "description": "Other Location for managing instances", + "displayName": "OtherLocation-1", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "OTHER_LOCATION" + } + } + }, + "AWSLocationRequest": { + "value": { + "metadata": { + "name": "my-aws-location", + "description": "AWS Location for us-east-1", + "displayName": "us-east-1-location", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AWS_LOCATION", + "vpcID": "vpc-1234", + "region": "us-east-1", + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-aws-integration" + } + } + } + }, + "AzureLocationRequest": { + "value": { + "metadata": { + "name": "my-azure-location", + "description": "Azure Location for West US 2", + "displayName": "westus2-location", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AZURE_LOCATION", + "resourceGroup": "myResourceGroup", + "subscriptionID": "mySubscriptionID", + "region": "westus2", + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-azure-integration" + } + } + } + }, + "OtherLocationResponse": { + "value": { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "Other Location for managing instances", + "displayName": "OtherLocation-1", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-other-location" + }, + "name": "my-other-location", + "tags": [ + "dev", + "prod" + ], + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + }, + "AWSLocationResponse": { + "value": { + "currentStatus": { + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-aws-integration" + }, + "region": "us-east-1", + "type": "AWS_LOCATION", + "vpcID": "vpc-1234" + }, + "desiredState": { + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-aws-integration" + }, + "region": "us-east-1", + "type": "AWS_LOCATION", + "vpcID": "vpc-1234" + }, + "metadata": { + "createTime": "2020-05-13T16:57:02.931198Z", + "description": "AWS Location for us-east-1", + "displayName": "us-east-1-location", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-aws-location" + }, + "name": "my-aws-location", + "tags": [ + "dev", + "prod" + ], + "uid": "62f72025-59a1-4c11-8cca-798b5e12efb8", + "updateTime": "2020-05-13T16:57:02.931198Z" + } + } + }, + "AzureLocationResponse": { + "value": { + "currentStatus": { + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-azure-integration" + }, + "type": "AZURE_LOCATION", + "subscriptionID": "mySubscriptionID", + "region": "westus2" + }, + "desiredState": { + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-azure-integration" + }, + "type": "AZURE_LOCATION", + "subscriptionID": "mySubscriptionID", + "region": "westus2" + }, + "metadata": { + "createTime": "2020-05-13T16:57:02.931198Z", + "description": "Azure Location for West US 2", + "displayName": "westus2-location", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-azure-location" + }, + "name": "my-azure-location", + "tags": [ + "dev", + "prod" + ], + "uid": "62f72025-59a1-4c11-8cca-798b5e12efb8", + "updateTime": "2020-05-13T16:57:02.931198Z" + } + } + }, + "ListLocationResponse": { + "value": { + "items": [ + { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T09:29:49.464273Z", + "description": "Location for instances where location has not been specified", + "displayName": "Unspecified (default)", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/unspecified" + }, + "name": "unspecified", + "tags": [ + "default" + ], + "uid": "802ef1f8-9105-474a-b9a7-599837efd6b4", + "updateTime": "2020-05-13T09:29:49.464273Z" + } + }, + { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "Other Location for managing instances", + "displayName": "OtherLocation-1", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-other-location" + }, + "name": "my-other-location", + "tags": [ + "dev", + "prod" + ], + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + ] + } + }, + "AWSRequest": { + "value": { + "metadata": { + "name": "my-instance-template", + "description": "AWS Instance Template for T2 large", + "displayName": "T2large-InstanceTemplate", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AWS_INSTANCE_TEMPLATE", + "amiID": "ami-a0cfeed8", + "instanceType": "t2.large", + "subnetID": "subnet-12345678", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "publicKey": "my-public-key", + "associatePublicIPAddress": true + } + } + }, + "AzureRequestWithMarketplaceImageAndUsingExistingNic": { + "value": { + "metadata": { + "name": "my-azure-template-for-standard-A1", + "description": "Azure Instance Template for Standard A1", + "displayName": "Standard_A1-instance-template", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AZURE_INSTANCE_TEMPLATE", + "image": { + "type": "AZURE_IMAGE_REFERENCE", + "publisher": "nginxinc", + "offer": "nginx-plus-v1", + "sku": "nginx-plus-ub1804", + "version": "latest" + }, + "instanceType": "Standard_A1", + "adminUser": "azureuser", + "publicKey": "ssh-rsa my-public-key-string", + "networkInterface": { + "type": "AZURE_NIC_ID", + "nicID": "/subscriptions/mySubscriptionID/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/myNicID" + } + } + } + }, + "AzureRequestWithCustomImageAndCreatingNewNicAndPublicIP": { + "value": { + "metadata": { + "name": "my-azure-template-for-standard-DS1_v2", + "description": "Azure Instance Template for Standard DS1v2", + "displayName": "DS1_v2-instance-template", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AZURE_INSTANCE_TEMPLATE", + "image": { + "type": "AZURE_IMAGE_ID", + "imageID": "/subscriptions/mySubscriptionID/resourceGroups/myResourceGroup/providers/Microsoft.Compute/images/myCustomImageID" + }, + "instanceType": "Standard_DS1_v2", + "adminUser": "azureuser", + "publicKey": "ssh-rsa my-public-key-string", + "networkInterface": { + "type": "AZURE_NIC_CONFIG", + "virtualNetwork": "my-virtual-network", + "subnet": "my-subnet", + "securityGroup": "my-network-sg", + "publicIp": true + } + } + } + }, + "AWSResponse": { + "value": { + "currentStatus": { + "amiID": "ami-a0cfeed8", + "associatePublicIPAddress": true, + "instanceType": "t2.large", + "publicKey": "my-public-key", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "subnetID": "subnet-12345678", + "type": "AWS_INSTANCE_TEMPLATE" + }, + "desiredState": { + "amiID": "ami-a0cfeed8", + "associatePublicIPAddress": true, + "instanceType": "t2.large", + "publicKey": "my-public-key", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "subnetID": "subnet-12345678", + "type": "AWS_INSTANCE_TEMPLATE" + }, + "metadata": { + "createTime": "2020-04-17T02:18:44.394232Z", + "description": "AWS Instance Template for T2 large", + "displayName": "T2large-InstanceTemplate", + "kind": "instance-template", + "links": { + "rel": "/api/v1/infrastructure/locations/location-1/instance-templates/my-instance-template" + }, + "name": "my-instance-template", + "uid": "4157d480-cd0e-40a0-8ba8-d0d5ce17c5d6", + "updateTime": "2020-04-17T02:18:44.394232Z" + } + } + }, + "AzureResponse": { + "value": { + "currentStatus": { + "type": "AZURE_INSTANCE_TEMPLATE", + "image": { + "type": "AZURE_IMAGE_REFERENCE", + "publisher": "nginxinc", + "offer": "nginx-plus-v1", + "sku": "nginx-plus-ub1804", + "version": "latest" + }, + "instanceType": "Standard_A1", + "adminUser": "azureuser", + "publicKey": "ssh-rsa my-public-key-string", + "networkInterface": { + "type": "AZURE_NIC_ID", + "nicID": "/subscriptions/mySubscriptionID/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/myNicID" + } + }, + "desiredState": { + "type": "AZURE_INSTANCE_TEMPLATE", + "image": { + "type": "AZURE_IMAGE_REFERENCE", + "publisher": "nginxinc", + "offer": "nginx-plus-v1", + "sku": "nginx-plus-ub1804", + "version": "latest" + }, + "instanceType": "Standard_A1", + "adminUser": "azureuser", + "publicKey": "ssh-rsa my-public-key-string", + "networkInterface": { + "type": "AZURE_NIC_ID", + "nicID": "/subscriptions/mySubscriptionID/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/myNicID" + } + }, + "metadata": { + "createTime": "2020-04-17T02:18:44.394232Z", + "description": "Azure Instance Template for Standard A1", + "displayName": "Standard_A1-instance-template", + "kind": "instance-template", + "links": { + "rel": "/api/v1/infrastructure/locations/azure-westus2/instance-templates/my-azure-template-for-standard-A1" + }, + "name": "my-azure-template-for-standard-A1", + "uid": "4157d480-cd0e-40a0-8ba8-d0d5ce17c5d6", + "updateTime": "2020-04-17T02:18:44.394232Z" + } + } + }, + "AWSListResponse": { + "value": { + "items": [ + { + "currentStatus": { + "amiID": "ami-a0cfeed8", + "associatePublicIPAddress": true, + "instanceType": "t2.large", + "publicKey": "my-public-key", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "subnetID": "subnet-12345678", + "type": "AWS_INSTANCE_TEMPLATE" + }, + "desiredState": { + "amiID": "ami-a0cfeed8", + "associatePublicIPAddress": true, + "instanceType": "t2.large", + "publicKey": "my-public-key", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "subnetID": "subnet-12345678", + "type": "AWS_INSTANCE_TEMPLATE" + }, + "metadata": { + "createTime": "2020-04-17T02:18:44.394232Z", + "description": "AWS Instance Template for T2 large", + "displayName": "T2large-InstanceTemplate", + "kind": "instance-template", + "links": { + "rel": "/api/v1/infrastructure/locations/location-1/instance-templates/my-instance-template" + }, + "name": "my-instance-template", + "uid": "4157d480-cd0e-40a0-8ba8-d0d5ce17c5d6", + "updateTime": "2020-04-17T02:18:44.394232Z" + } + } + ] + } + } + }, + "responses": { + "BadRequest": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "Unauthorized": { + "description": "User authentication is invalid or missing.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "NotFound": { + "description": "The specified instance group resource was not found.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "Conflict": { + "description": "The request failed due to a conflict with an existing instance group resource.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "NotAllowed": { + "description": "The request is not allowed on the specified resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "code": 120322, + "message": "Error deleting the location: the name 'unspecified' is reserved. Specify a different name for the location, then try again." + } + } + } + } + } + } +} \ No newline at end of file diff --git a/content/controller/api/reference/ctlr-analytics-api.md b/content/controller/api/reference/ctlr-analytics-api.md new file mode 100644 index 000000000..40499bac0 --- /dev/null +++ b/content/controller/api/reference/ctlr-analytics-api.md @@ -0,0 +1,14 @@ +--- +description: Represents the state of the F5 NGINX Controller Analytics REST API. +docs: DOCS-1279 +doctypes: + - reference +type: redoc +tags: + - api +title: Analytics API +toc: false +weight: 200 +--- + +{{< openapi spec="/controller/api/reference/ctlr-analytics-openapi.json" >}} diff --git a/content/controller/api/reference/ctlr-analytics-openapi.json b/content/controller/api/reference/ctlr-analytics-openapi.json new file mode 100644 index 000000000..f94f7d8c0 --- /dev/null +++ b/content/controller/api/reference/ctlr-analytics-openapi.json @@ -0,0 +1,2461 @@ +{ + "openapi": "3.0.0", + "info":{ + "title": "NGINX Controller Analytics REST API", + "version": "v1", + "description": "Use the NGINX Controller Analytics API to get information about your system and application performance." + }, + "servers": [ + { + "description": "NGINX Controller API", + "url": "https://{{CONTROLLER_FQDN}}/api/v1" + }], + "tags": [ + { + "name": "Alerts", + "description": "Use the Alerts API to manage the Alert Rule resources and retrieve information about the Alerts." + }, + { + "name": "Catalogs", + "description": "Use the Catalogs API to find out about the Metrics and Dimensions that you can use to refine your Analytics data queries." + }, + { + "name": "Events", + "description": "Use the Events API to retrieve information about system Events." + }, + { + "name": "Forwarders", + "description": "Use the Forwarders API to forward data (metrics, events) to external services." + }, + { + "name": "Metrics", + "description": "Use the Metrics API to retrieve system Metrics." + } + ], + "paths": { + "/analytics/alerts/rules": { + "get": { + "tags": [ + "Alerts" + ], + "summary": "List all Alert Rules", + "operationId": "listAlertRules", + "x-f5-experimental": true, + "description": "Returns a list of all the Alert Rule resources.", + "responses": { + "200": { + "description": "Successfully retrieved a list of all the Alert Rule resources.", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AlertRule" + } + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Alerts" + ], + "summary": "Create an Alert Rule", + "description": "Creates a new Alert Rule resource.", + "operationId": "createAlertRule", + "x-f5-experimental": true, + "requestBody": { + "description": "Defines the Alert Rule resource to create.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertRule" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Alert Rule resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertRule" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/analytics/alerts/rules/{alertRuleName}": { + "parameters": [ + { + "name": "alertRuleName", + "description": "Identifies the Alert Rule resource.", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "get": { + "tags": [ + "Alerts" + ], + "summary": "Get an Alert Rule", + "description": "Returns information for the specified Alert Rule.", + "operationId": "getAlertRule", + "x-f5-experimental": true, + "responses": { + "200": { + "description": "Successfully returned the requested Alert Rule resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertRule" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Alerts" + ], + "summary": "Upsert an Alert Rule", + "description": "Creates a new Alert Rule or updates an existing Alert Rule resource.", + "operationId": "updateAlertRule", + "x-f5-experimental": true, + "requestBody": { + "description": "Defines the Alert Rule resource to create or the updates to make.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertRule" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Alert Rule resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertRule" + } + } + } + }, + "201": { + "description": "Successfully created the specified Alert Rule resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertRule" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Alerts" + ], + "summary": "Delete an Alert Rule", + "description": "Deletes the specified Alert Rule resource.", + "operationId": "deleteAlertRule", + "x-f5-experimental": true, + "responses": { + "204": { + "description": "Successfully deleted the specified Alert Rule resource." + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/analytics/alerts/rules/{alertRuleName}/alerts": { + "parameters": [ + { + "name": "alertRuleName", + "description": "Identifies the Alert Rule resource.", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "get": { + "tags": [ + "Alerts" + ], + "summary": "Get ongoing Alerts", + "description": "Returns information about Alerts for the specified Alert Rule.", + "operationId": "getAlerts", + "x-f5-experimental": true, + "responses": { + "200": { + "description": "Successfully returned the requested Alerts.", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Alert" + } + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/analytics/alerts/rules/{alertRuleName}/alerts/{alertId}": { + "parameters": [ + { + "name": "alertRuleName", + "description": "Identifies the Alert Rule resource.", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "alertId", + "description": "Identifies the Alert resource.", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "get": { + "tags": [ + "Alerts" + ], + "summary": "Get Alert", + "description": "Returns information about the requested Alert for the specified Alert Rule.", + "operationId": "getAlert", + "responses": { + "200": { + "description": "Successfully returned the requested Alert.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Alert" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/analytics/catalogs/metrics": { + "get": { + "tags": [ + "Catalogs" + ], + "summary": "List the Metrics Catalog", + "operationId": "listMetricsDescriptions", + "description": "Lists all of the Metric resources in the Catalog.\n\nThe Metrics Catalog contains the definitions for all of the available NGINX and NGINX Controller Agent metrics.\nYou can use the Catalogs API to find information about the available Metrics, then use those Metrics to refine your Analytics data queries.\n", + "responses": { + "200": { + "description": "Successfully returned the Metrics Catalog list.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListMetricsDescriptionsResponse" + } + } + } + } + } + } + }, + "/analytics/catalogs/metrics/{metricName}": { + "get": { + "tags": [ + "Catalogs" + ], + "summary": "Get a Metric Description", + "operationId": "getMetricDescription", + "description": "Gets the description of the specified Metric.\n", + "parameters": [ + { + "in": "path", + "name": "metricName", + "description": "Name of the Metric resource.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successfully returned the requested Metric resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MetricDescriptionResponse" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Metric resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/analytics/catalogs/dimensions": { + "get": { + "tags": [ + "Catalogs" + ], + "summary": "List the Dimensions Catalog", + "operationId": "listDimensionsDescriptions", + "description": "Lists all of the Dimension resources in the Catalog.\n\nDimensions provide a means of refining the data returned by metrics and events queries.\nYou can use the List Dimensions Catalog endpoint to find out what Dimensions are available.\nThen, use the Get a Dimension endpoint to find out more about a specific dimension.\n", + "responses": { + "200": { + "description": "Successfully returned the requested Dimension resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListDimensionsDescriptionsResponse" + } + } + } + } + } + } + }, + "/analytics/catalogs/dimensions/{dimensionName}": { + "get": { + "tags": [ + "Catalogs" + ], + "summary": "Get a Dimension Description", + "operationId": "getDimensionDescription", + "description": "Gets the description of the specified Dimension resource.\n", + "parameters": [ + { + "in": "path", + "name": "dimensionName", + "description": "Name of the Dimension", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successfully returned the requested Dimension resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetDimensionDescriptionResponse" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Dimension resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/analytics/events": { + "get": { + "tags": [ + "Events" + ], + "summary": "Get Events", + "operationId": "queryEvents", + "description": "Returns a list of system Events.", + "parameters": [ + { + "name": "startTime", + "in": "query", + "description": "Indicates the start of the time window to include Events from (inclusive).\n\n- Must be provided if `endTime` was specified.\n- If you do not set 'startTime`, the most recent 100 Events will be returned.\n- `startTime` and `endTime` can be formatted as Event IDs (UUID format). If you use this format for any boundary, the validity of that boundary (for example, if endTime is greater than startTime) will not be checked.\n", + "schema": { + "$ref": "#/components/schemas/Time" + } + }, + { + "name": "endTime", + "in": "query", + "description": "Indicates the end of the time window to include Events from (non-inclusive).\n\n- Must be greater than `startTime`.\n- Must be provided if `startTime` was specified.\n", + "schema": { + "$ref": "#/components/schemas/Time" + } + }, + { + "name": "filter", + "in": "query", + "description": "Filters results based on dimension values.\n\nConsists of one or more predicates in the form `` where:\n- `` is the name of the dimension,\n- `` is one of the supported operators (`=`, `!=`, `<`, `<=`, `>=` `>`, `in` or `not`)\n- `` is the value of the dimension(s) that you want to filter on,\n- ` and ` are case sensitive.\n\nFor example: 'count > 100'\n\nPredicates can be combined into logical expressions using `OR`, `AND`, and `(` `)`.\nFor matching values, wildcard (`*`) use is supported.\n\nWrapping predicates in single quotes is recommended to ensure that the full query string is processed correctly.\n", + "schema": { + "type": "string", + "example": "category IN ('agent','controller') AND level='debug' AND count > 100" + }, + "allowReserved": true + }, + { + "name": "orderBy", + "in": "query", + "description": "Indicates how the results will be ordered.\n\nConsists of at most three comma-separated clauses in the form `` or `` where:\n- `` is the name of the dimension.\n- `` is the order by which the specified dimension will be sorted. Valid sort order is either \"ASC\" for ascending order, or \"DESC\" for descending order. If no sort order is provided, the default one (\"ASC\") will be used.\n\nNote that if no orderBy is provided, the default combination of descending \"timestamp\" and ascending \"id\" will be used regardless. If you provide different order for \"timestamp\" or \"id\" in orderBy, the default one will be overwritten.\n", + "schema": { + "type": "string", + "example": "name ASC,timestamp DESC" + } + }, + { + "name": "page", + "in": "query", + "description": "page number", + "required": false, + "schema": { + "type": "integer", + "default": 1, + "example": 1 + } + }, + { + "name": "pageToken", + "in": "query", + "description": "Transactional token used for pagination.\n\nThe token ensures consistency of the query results across requests for various pages of data. It provides a snapshot of the database contents from the time at which the query was received.\n\nIf `pageToken` is not provided with a page request, a token is automatically generated and will be returned in the response metadata. You should include the token in subsequent requests for pages in the query results.\n\nSending a query without a `pageToken` refreshes the query results.\n", + "schema": { + "type": "string", + "example": "1573653786" + } + }, + { + "name": "pageSize", + "in": "query", + "description": "Defines the number of returned items per page.\n\n- The maximum value is 100. If value is greater, it is automatically set down to 100.\n- If `pageSize`=0, pagination is disabled and all Events will be returned in response.\n- The response size is limited to 10,000 results. If the number of results exceeds 10,000 a warning is returned.\n", + "schema": { + "type": "integer", + "default": 100, + "example": 10 + } + }, + { + "name": "includeTotal", + "in": "query", + "description": "Defines if the Metadata should include the total count of events.\n", + "schema": { + "type": "boolean", + "default": false, + "example": true + } + } + ], + "responses": { + "200": { + "description": "Successfully returned the requested Events.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EventQueryResult" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/analytics/events/{id}": { + "get": { + "tags": [ + "Events" + ], + "summary": "Get an Event", + "operationId": "queryEventsById", + "description": "Returns the specified Event.", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Identifies the Event that you want to retrieve.", + "required": true, + "schema": { + "type": "string", + "example": "00112233-4455-6677-8899-aabbccddeeff" + } + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Event.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Event" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/analytics/forwarders": { + "post": { + "tags": [ + "Forwarders" + ], + "summary": "Create a Forwarder", + "description": "Creates a new Forwarder resource.", + "operationId": "createForwarder", + "requestBody": { + "description": "Contains the desired settings for the Forwarder resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Forwarder" + } + } + } + }, + "responses": { + "202": { + "description": "The request to create a Forwarder resource succeeded. The resource will be created when the configuration is complete.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Forwarder" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "409": { + "$ref": "#/components/responses/Conflict" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "get": { + "tags": [ + "Forwarders" + ], + "summary": "Get a list of Forwarders.", + "description": "Returns a list of all Forwarder resources.", + "operationId": "listForwarders", + "responses": { + "200": { + "description": "Successfully retrieved a list of all Forwarder resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ForwarderListResponse" + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/analytics/forwarders/{forwarderName}": { + "get": { + "tags": [ + "Forwarders" + ], + "summary": "Get a Forwarder", + "operationId": "getForwarder", + "description": "Gets information about an existing Forwarder resource.", + "parameters": [ + { + "$ref": "#/components/parameters/ForwarderName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Forwarder resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Forwarder" + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "put": { + "tags": [ + "Forwarders" + ], + "summary": "Create or update a Forwarder", + "description": "Creates new or updates an existing Forwarder resource.", + "operationId": "updateForwarder", + "requestBody": { + "description": "Contains the desired settings for the Forwarder resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Forwarder" + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/ForwarderName" + } + ], + "responses": { + "202": { + "description": "Successfully initialized the update process of a Forwarder resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Forwarder" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "delete": { + "tags": [ + "Forwarders" + ], + "summary": "Delete a Forwarder", + "operationId": "deleteForwarder", + "description": "Deletes a Forwarder resource.", + "parameters": [ + { + "$ref": "#/components/parameters/ForwarderName" + } + ], + "responses": { + "202": { + "description": "The request to delete a Forwarder resource succeeded. The resource is marked for deletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Forwarder" + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/analytics/metrics": { + "get": { + "tags": [ + "Metrics" + ], + "summary": "Get Metrics", + "operationId": "queryMetrics", + "description": "Returns system Metrics data based on the query parameters provided.\n\nSupports filtering based on name and/or timestamp; aggregation over a configurable time span; and grouping by dimension.\n", + "parameters": [ + { + "name": "names", + "in": "query", + "description": "Identifies the Metrics data that you want to receive.\n\n- Provide multiple names as a comma-separated list.\n- You can provide an aggregate function for each Metric (`AVG`, `SUM`, `COUNT`, `MAX`, `MIN`, or `RATE`).\n- Combining non-aggregated and aggregated Metrics in a single query would apply any `groupBy` clause to only the latter.\n- Metrics with aggregates require the `startTime` parameter.\n", + "required": true, + "schema": { + "type": "string", + "example": "AVG(controller.agent.cpu.system),SUM(plus.upstream.bytes_rcvd)" + } + }, + { + "name": "startTime", + "in": "query", + "description": "Indicates the start of the time window to include Metrics from (inclusive).\n\n- If `startTime` is omitted, the last recorded value for the queried Metrics will be returned.\n- For aggregated Metrics, you must provide `startTime` in order to calculate the resolution.\n", + "schema": { + "$ref": "#/components/schemas/Time" + }, + "example": "now-5h" + }, + { + "name": "endTime", + "in": "query", + "description": "Indicates the end of the time window to include Metrics from (non-inclusive).\n\n- Must be greater than `startTime`.\n- If `endTime` is not specified when `startTime` is present, `endTime` defaults to the current time.\n", + "schema": { + "$ref": "#/components/schemas/Time" + } + }, + { + "name": "resolution", + "in": "query", + "description": "Changes the granularity of the returned data.\n\n- Must be a valid duration -- a string starting with a number followed by a unit of time (`y`, `M`, `w`, `d`, `h`, `m` or `s`).\n- When using a resolution, you must provide an aggregate function (or functions) in the `names` parameter and `startTime`.\n- If you do not provide a resolution, the maximum available resolution will be returned (`endTime` - `startTime`).\n", + "schema": { + "type": "string", + "example": "30s" + } + }, + { + "name": "filter", + "in": "query", + "description": "Filters results based on dimension values.\n\nConsists of one or more predicates in the form `` where:\n- `` is the name of the dimension.\n- `` is one of the supported operators (`=`, `!=`, `<`, `<=`, `>=` `>`, `in` or `not`).\n- `` is value of the dimension(s) that you want to filter on.\n- ` and ` is case sensitive.\n\nFor example: 'count > 100'\n\nPredicates can be combined into logical expressions using `OR`, `AND`, and `(` `)`.\nFor matching values, wildcard (`*`) use is supported.\n\nWrapping predicates in single quotes is recommended to ensure that the full query string is processed correctly.\n\nSee the Dimensions [Catalog](#/tags/catalogs) to find out more about Dimensions.\n", + "schema": { + "type": "string", + "example": "((app!='app1' OR app='app2') AND environment in ('aa', 'bb')) AND (tags!='*tag1*' AND interface='ens*')" + } + }, + { + "name": "groupBy", + "in": "query", + "description": "Groups the results according to the specified dimension(s).\n\n- Provide multiple dimension names as a comma-separated list.\n- All Metric names that you pass into the `names` parameter must be aggregated for the `groupBy` to work.\n", + "schema": { + "type": "string", + "example": "app,environment" + } + }, + { + "name": "seriesLimit", + "in": "query", + "description": "Sets an upper limit on the number of series returned.\nAlways returns additional series with dimension value named `all` (aggregating the values of all the metrics included in the results).\nIf the result does not include `all` stored dimensions values, the Metrics query returns series with dimension value named `other` (aggregating the values of all the metrics not included in the results).\nHas to be used along with the `orderSeriesBy` parameter.\n\n- Exactly one groupBy parameter must be provided in the query\n- Cannot be used along with `dimensions` parameter.\n", + "schema": { + "type": "integer", + "example": 25 + } + }, + { + "name": "orderSeriesBy", + "in": "query", + "description": "Sorts the results according to the order specified. \n\nUsed along with the `seriesLimit` parameter.\n", + "schema": { + "type": "string", + "example": "MAX DESC", + "default": "SUM DESC" + } + }, + { + "name": "dimensions", + "in": "query", + "description": "List of dimensions that should be returned in the response for each metric series.\n\nDimensions not specified in this parameter will be hidden in the results.\nThis might result in series having exact same dimension set, but being visible as separate.\n\nIf the `aggregation` and `groupBy` parameters are provided for a metric, any dimension provided in\nthe `dimensions` query parameter also has to be provided in the `groupBy` parameter.\n\nDimensions provided in the `groupBy` query parameter do not have to be provided in the `dimensions` query parameter.\n\n- To get a single series, provide the metric name with aggregation and the dimensions parameter with an empty value.\n- Cannot be used along with `seriesLimit`.\n", + "schema": { + "type": "string", + "example": "app,environment" + } + } + ], + "responses": { + "200": { + "description": "Successfully returned the requested Metrics data.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MetricQueryResult" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + } + }, + "components": { + "securitySchemes": { + "cookieAuth": { + "type": "apiKey", + "in": "cookie", + "name": "session" + } + }, + "responses": { + "BadRequest": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "Unauthorized": { + "description": "User authentication is invalid or missing.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "Forbidden": { + "description": "The request failed due to insufficient privileges.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "Internal": { + "description": "The request cannot be processed because of an internal server error.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "NotFound": { + "description": "The requested resource was not found or is unavailable.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "Conflict": { + "description": "The request failed due to a conflict with an existing resource.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + }, + "parameters": { + "ForwarderName": { + "in": "path", + "name": "forwarderName", + "schema": { + "type": "string" + }, + "required": true, + "description": "The name of the Forwarder resource.\n" + } + }, + "schemas": { + "Alert": { + "title": "alert", + "type": "object", + "properties": { + "uid": { + "type": "string", + "format": "uuid" + }, + "dimensions": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "startedTimestamp": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T09:12:33.001Z", + "description": "Timestamp when the Alert started." + }, + "startedValue": { + "type": "number", + "description": "Value of the metric that caused the Alert to start." + }, + "lastCheckedTimestamp": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T09:12:33.001Z", + "description": "Timestamp when the Alert was last checked." + }, + "lastCheckedValue": { + "type": "number", + "description": "Value of the metric when the Alert was last checked." + } + }, + "required": [ + "uid", + "dimensions", + "startedTimestamp", + "startedValue", + "lastCheckedTimestamp", + "lastCheckedValue" + ] + }, + "AlertRuleStatus": { + "title": "AlertRuleStatus", + "type": "object", + "properties": { + "alertsCount": { + "type": "integer", + "description": "Number of times an Alert for the Alert Rule has been started since creation." + }, + "status": { + "type": "string", + "enum": [ + "ok", + "ongoing" + ], + "description": "Alert Rule status:\n- `ok`: All the Alerts have expired or have never started.\n- `ongoing`: At least one Alert is currently ongoing.\n" + }, + "lastStartedTimestamp": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T09:12:33.001Z", + "description": "Timestamp when the Alert Rule transfered from 'ok' to 'ongoing' status." + }, + "lastExpiredTimestamp": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T09:12:33.001Z", + "description": "Timestamp when the Alert Rule transfered from 'ongoing' to 'ok' status." + }, + "lastCheckedTimestamp": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T09:12:33.001Z", + "description": "Timestamp of the last successful check of the Alert Rule." + } + } + }, + "AlertRule": { + "title": "AlertRule", + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "userEmail": { + "type": "string", + "format": "email", + "readOnly": true, + "description": "Email of the user who created the Alert Rule." + }, + "metric": { + "type": "string", + "description": "A valid metric from the catalog.", + "example": "upstream.network.latency.max" + }, + "aggregation": { + "type": "string", + "description": "Aggregation of the metric over the configured period of time.", + "readOnly": true + }, + "filter": { + "type": "string", + "description": "The use of filter parameter limits the metrics data samples being taken into account for Alert Rule calculation\nto those matching the filter expression.\nFilter parameter syntax works the same as for `/analytics/metrics` API.\n\n- An empty filter parameter indicates that all collected data is going to be taken into account for Alert Rule calculation\n- `filter=app='myapp' AND (env='prod' or env='dev')` indicates that only samples related to desired app/env will be taken into account\n", + "example": "((app!='app1' OR app='app2') AND environment in ('dev', 'prod')) AND (tags!='*tag1*' AND interface='ens*')" + }, + "groupBy": { + "type": "string", + "description": "When `groupBy` is provided, alerts will be triggered for each distinct group of provided dimension(s).\nGroupBy parameter syntax works the same as for `/analytics/metrics` API.\n", + "example": "app,environment" + }, + "threshold": { + "type": "number", + "description": "Threshold to which the metric value will be compared to." + }, + "operator": { + "type": "string", + "enum": [ + "ge", + "le" + ], + "description": "Operator that defines which metric values will trigger an alert:\n\n- `ge`: metric value must be greater than or equal to the threshold in order to trigger an alert\n- `le`: metric value must be less than or equal to the threshold in order to trigger an alert\n" + }, + "period": { + "type": "string", + "example": "1h", + "description": "Period from which the metric aggregated value is calculated.\n\nMust be a valid duration - a string starting with a number followed by a unit of time (`h`, `m` or `s`).\n\n24h is the maximum allowed period.\n" + }, + "notificationType": { + "description": "Defines how notifications about triggered Alerts will be delivered to the user.", + "type": "string", + "enum": [ + "email" + ] + }, + "emailAddresses": { + "type": "array", + "minItems": 1, + "uniqueItems": true, + "items": { + "type": "string", + "format": "email", + "example": "user@example.com" + } + }, + "mute": { + "type": "boolean", + "description": "Indicates if the Alert Rule is muted. (Muted Alert Rules will not trigger notifications).", + "default": false + }, + "status": { + "$ref": "#/components/schemas/AlertRuleStatus", + "readOnly": true + } + }, + "required": [ + "metadata", + "metric", + "threshold", + "operator", + "period" + ] + }, + "SelfLinks": { + "type": "object", + "description": "The SelfLinks object contains a link from the resource to itself.\nThis object is used only in responses.\n", + "properties": { + "rel": { + "type": "string", + "example": "/api/v1/services/environments/prod", + "description": "`rel` contains the complete path fragment of a URI and can be used\nto construct a query to the object.\n" + } + } + }, + "ResourceMeta": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "pattern": "^[^A-Z\\s\\x00-\\x1f\\x60\\x7f\\;\\*\\\"\\[\\]\\{\\}\\\\\\/%\\?:=&\\~\\^|#<>]+$", + "not": { + "type": "string", + "enum": [ + ".", + ".." + ] + }, + "minLength": 1, + "maxLength": 1024, + "example": "resource-name", + "description": "Resource name is a unique identifier for a resource within the context of a namespace.\nResource names must conform to [RFC 1738 Section 2.2](https://www.ietf.org/rfc/rfc1738.txt) and have a valid syntax for email addresses. The following rules are enforced:\n\n- do not utilize URL encoding;\n- do not include spaces;\n- do not use uppercase characters, for example, 'A-Z'; extended character sets are supported;\n- do not use the following characters: `\"`, `*`, `:`, `;`, `/`, `\\`, `%`, `?`, `hash`, `=`, `&`, `|`, `~`, `^`, `{`, `}`, `[`, `]`, `<`, `>`;\n- cannot start or end with an `@` sign;\n- cannot be only `.` or `..`\n\nFor example: For a collection resource located at\n\n`https://controller.example.com/api/v1/services/apps/shopping_@1`\n\nthe resource name is \"shopping_@1\".\n" + }, + "displayName": { + "type": "string", + "example": "My Display Name", + "description": "`displayName` is a user friendly resource name. It can be used to define \na longer, and less constrained, name for a resource.\n\nDisplay names:\n\n- are optional (defaults to an empty string if no value is provided),\n- do not have to be unique, \n- cannot be assigned by the server.\n" + }, + "description": { + "type": "string", + "example": "This is a sample description string. It provides information about the resource.", + "description": "`description` is a free-form text property. You can use it to provide information that helps \nto identify the resource.\n\nDescriptions:\n\n- are optional (defaults to an empty string if no value is provided),\n- do not have to be unique, \n- cannot be assigned by the server.\n" + }, + "kind": { + "type": "string", + "example": "-", + "description": "Kind is a string representation of an API resource's data type.\nIt is assigned by the server and cannot be changed. \n\nWhen creating a `kind`, the server uses hyphens to connect word segments; \nsingleton and collection item resources are not pluralized.\n" + }, + "uid": { + "type": "string", + "format": "uuid", + "example": "d290f1ee-6c54-4b01-90e6-d701748f0851", + "description": "Unique Identifier (UID)\n\nUID is a unique identifier in time and space for a resource. \nWhen you create a resource, the server assigns a UID to the resource.\n\nRefer to [IETF RFC 4122](https://tools.ietf.org/html/rfc4122) for more information.\n" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ], + "description": "You can assign `tags` to a resource as a way to help map, scope, \nand organize resources. \n\nThe system uses tag selectors to specify selection criteria that \nmatch resources that have particular tags.\n" + }, + "ref": { + "type": "string", + "example": "/services/environments/prod", + "description": "The `ref` field contains a reference to another NGINX Controller resource.\n" + }, + "links": { + "$ref": "#/components/schemas/SelfLinks" + }, + "createTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T09:12:33.001Z", + "description": "A timestamp that represents the server time when the resource was created.\n\nCreate time is not guaranteed to be set in \"happens-before\" order\nacross separate operations.\n\nIn JSON format, `create_time` type is encoded as a string in the\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n\nFor example: 2018-04-01T01:30:15.01Z\n\nCreate Time is assigned by the server and cannot be changed.\n" + }, + "updateTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T10:12:33.001Z", + "description": "A timestamp that represents the server time when the resource was last modified.\n\nResources that have never been updated do not have an `update_time` stamp.\n\nThe default value for resources that have never been updated is the local \nlanguage-specific equivalent of \"null\".\n\nIn JSON format, `update_time` type is encoded as a string as described in \n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n" + } + } + }, + "ErrorDetail": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string", + "example": "Error doing : . This can lead to . Try to resolve the issue.", + "description": "A detailed error message returned by the server. \n\nThese messages contain the following information, where applicable:\n\n- What happened.\n- Why it happened.\n- What the consequences are (if any).\n- Recommended action to take to resolve the issue.\n" + } + } + }, + "ErrorModel": { + "type": "object", + "required": [ + "message", + "code" + ], + "properties": { + "message": { + "type": "string", + "example": "Error doing .", + "description": "A human-readable message, in English, that describes the error.\n" + }, + "code": { + "type": "integer", + "example": 1234567, + "description": "A numeric error code that can be used to identify errors for support purposes.\n" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorDetail" + } + } + } + }, + "Time": { + "description": "Indicates a point in time.\n\nYou can provide time using ISO 8601 format or as an offset. An offset is a string that starts with `+` or `-`, followed by a number and a unit of time (`y`, `M`, `w`, `d`, `h`, `m` or `s`).\nExamples: \"2019-08-07T09:57:36.088757764Z\", \"now-3h\"\n", + "type": "string", + "example": "2019-08-07T09:57:36.088757764Z" + }, + "MetricQueryResult": { + "type": "object", + "properties": { + "queryMetadata": { + "$ref": "#/components/schemas/MetricQueryMetadata" + }, + "metrics": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Metric" + } + }, + "responseMetadata": { + "$ref": "#/components/schemas/MetricResponseMetadata" + } + } + }, + "MetricResponseMetadata": { + "type": "object", + "properties": { + "warning": { + "type": "string" + } + } + }, + "MetricQueryMetadata": { + "type": "object", + "properties": { + "startTime": { + "type": "string", + "format": "date-time", + "example": "2019-08-07T09:57:36.088757764Z" + }, + "endTime": { + "type": "string", + "format": "date-time", + "example": "2019-08-07T09:57:36.088757764Z" + }, + "resolution": { + "type": "string", + "example": "30m" + } + } + }, + "Metric": { + "type": "object", + "description": "Metrics provide a means of measuring and analyzing the performance of your systems and of NGINX Controller.", + "properties": { + "name": { + "type": "string", + "example": "plus.upstream.response.count" + }, + "aggr": { + "type": "string", + "enum": [ + "AVG", + "COUNT", + "SUM", + "MAX", + "MIN", + "RATE" + ], + "example": "AVG" + }, + "series": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Series" + } + } + } + }, + "Series": { + "type": "object", + "properties": { + "dimensions": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "example": { + "app": "app1", + "env": "prod" + } + }, + "timestamps": { + "type": "array", + "items": { + "type": "string", + "format": "date-time" + }, + "example": [ + "2019-08-07T09:57:30", + "2019-08-07T09:57:35" + ] + }, + "values": { + "type": "array", + "items": { + "type": "number" + }, + "example": [ + 4.2, + 4.4 + ] + } + } + }, + "ListMetricsDescriptionsResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MetricDescriptionRecord" + } + } + } + }, + "MetricDescriptionResponse": { + "$ref": "#/components/schemas/MetricDescriptionRecord" + }, + "MetricDescriptionRecord": { + "type": "object", + "required": [ + "metadata", + "currentStatus" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/MetricDescription" + } + } + }, + "MetricDescription": { + "type": "object", + "description": "The information record for a Metrics Catalog resource.", + "properties": { + "name": { + "type": "string", + "example": "nginx.http.request.bytes_sent" + }, + "description": { + "type": "string", + "example": "Number of bytes sent to clients." + }, + "unit": { + "type": "string", + "example": "bytes" + }, + "aggregations": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "AVG", + "COUNT", + "SUM", + "MAX", + "MIN" + ] + }, + "example": [ + "AVG", + "SUM" + ] + }, + "type": { + "type": "string", + "enum": [ + "counter", + "gauge", + "incremental" + ], + "example": "incremental" + }, + "categories": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "nginx", + "apimgmt_environment", + "apimgmt_definition", + "apimgmt_entry_point" + ], + "description": "An array of freeform strings containing the category(ies) of a Metrics Catalog resource." + }, + "dimensions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + }, + "description": "An array of the Dimensions that apply to a Metrics Catalog resource." + } + } + }, + "ListDimensionsDescriptionsResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DimensionDescriptionRecord" + } + } + } + }, + "GetDimensionDescriptionResponse": { + "$ref": "#/components/schemas/DimensionDescriptionRecord" + }, + "DimensionDescriptionRecord": { + "type": "object", + "required": [ + "metadata", + "currentStatus" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/DimensionDescription" + } + } + }, + "DimensionDescription": { + "type": "object", + "description": "The information record for a Dimensions Catalog resource.", + "properties": { + "name": { + "type": "string", + "example": "instance" + }, + "description": { + "type": "string", + "example": "Instance name." + }, + "type": { + "type": "string", + "enum": [ + "string", + "int", + "list" + ], + "example": "string" + }, + "metrics": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + }, + "description": "An array of the Metrics that the Dimensions resource applies to." + } + } + }, + "NamedLinks": { + "allOf": [ + { + "$ref": "#/components/schemas/SelfLinks" + }, + { + "type": "object", + "description": "Contains information about the object being referred to.\n\nThese are generally details -- like the object name and display name --\nthat are useful to a consumer of the API that performs further\nprocessing. \n\nThis object is only present in responses.\n \n", + "properties": { + "name": { + "type": "string", + "example": "production", + "description": "The name of the linked resource.\n" + }, + "displayName": { + "type": "string", + "example": "Production Environment", + "description": "A user friendly resource name." + } + } + } + ] + }, + "ResourceRef": { + "type": "object", + "required": [ + "ref" + ], + "properties": { + "ref": { + "type": "string", + "example": "/services/environments/prod", + "description": "A reference to another NGINX Controller resource.\n" + }, + "links": { + "$ref": "#/components/schemas/NamedLinks" + } + } + }, + "EventQueryResult": { + "type": "object", + "properties": { + "Metadata": { + "$ref": "#/components/schemas/Metadata" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Event" + } + } + } + }, + "Event": { + "type": "object", + "description": "An Event is a system message.", + "properties": { + "timestamp": { + "type": "string", + "format": "date-time", + "example": "2019-08-07T09:57:36.088757764Z" + }, + "id": { + "type": "string", + "format": "uuid", + "example": "00112233-4455-6677-8899-aabbccddeeff" + }, + "message": { + "type": "string", + "example": "nginx stub_status detected, https://127.0.0.1:443/basic_status" + }, + "count": { + "type": "integer", + "example": 99 + }, + "category": { + "type": "string" + }, + "level": { + "type": "string", + "enum": [ + "DEBUG", + "INFO", + "WARNING", + "ERROR", + "CRITICAL" + ], + "example": "INFO" + }, + "hostname": { + "type": "string", + "example": "d0784771a503" + }, + "alias": { + "type": "string", + "example": "my_system" + }, + "status": { + "type": "string", + "example": "success" + }, + "error": { + "type": "string" + }, + "local_id": { + "type": "string", + "example": "d23c85484ee760ee5f4619c0434e1968b5290964487541da0889964eb783613c" + }, + "root_uuid": { + "type": "string", + "example": "759e842f6b9f51a7803f57da1e9f5ae8" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "example": "[\"tag1\", \"tag2\"]" + }, + "instance": { + "type": "string", + "example": "2" + }, + "location": { + "type": "string", + "example": "unspecified" + }, + "correlationId": { + "type": "string", + "example": "e5f355e4de82edd0afcd74ef6c1ee6aa" + }, + "http.request_endpoint": { + "type": "string", + "example": "/services/environments/dev/apps/app/components/25" + }, + "http.request_method": { + "type": "string", + "example": "POST" + }, + "user": { + "type": "string", + "example": "admin@nginx.test" + }, + "environment": { + "type": "string", + "example": "dev-environment" + }, + "app": { + "type": "string", + "example": "finance-app" + }, + "component": { + "type": "string", + "example": "secure-component" + }, + "gateway": { + "type": "string", + "example": "dev-gateway" + }, + "request_outcome": { + "type": "string", + "enum": [ + "REJECTED", + "PASSED" + ], + "example": "REJECTED" + }, + "request_outcome_reason": { + "type": "string", + "enum": [ + "SECURITY_WAF_OK", + "SECURITY_WAF_VIOLATION", + "SECURITY_WAF_FLAGGED", + "SECURITY_WAF_BYPASS", + "SECURITY_NGINX_VIOLATION", + "SECURITY_WAF_VIOLATION_TRANSPARENT" + ], + "example": "SECURITY_WAF_FLAGGED" + }, + "http.response_code": { + "type": "integer", + "example": 200 + }, + "http.hostname": { + "type": "string", + "example": "itay-108-117.f5net.com" + }, + "http.remote_addr": { + "type": "string", + "example": "127.0.0.1" + }, + "http.remote_port": { + "type": "integer", + "example": 4000 + }, + "http.server_addr": { + "type": "string", + "example": "0.0.0.0" + }, + "http.server_port": { + "type": "integer", + "example": 80 + }, + "http.request": { + "type": "string", + "example": "GET /test.exe HTTP/1.1\\r\\nUser-Agent: curl/7.29.0\\r\\nHost: localhost\\r\\nAccept: */*\\r\\n" + }, + "waf.support_id": { + "type": "string", + "example": "161339000577578694" + }, + "waf.signature_ids": { + "type": "string", + "example": "200001475,200000098" + }, + "waf.policy": { + "type": "string", + "example": "/Common/policy1" + }, + "waf.attack_types": { + "type": "string", + "example": "Non-browser Client,Abuse of Functionality,Cross Site Scripting (XSS)" + }, + "waf.violation_rating": { + "type": "string", + "enum": [ + "RISK_COULD_NOT_BE_DETERMINED", + "POSSIBLE_ATTACK", + "MOST_LIKELY_ATTACK" + ], + "example": "MOST_LIKELY_ATTACK" + }, + "waf.signature_names": { + "type": "string", + "example": "XSS script tag end (Parameter) (2),XSS script tag (Parameter)" + }, + "waf.is_truncated": { + "type": "string", + "example": "true" + }, + "waf.signature_cves": { + "type": "string", + "example": "CVE-2017-8759,CVE-2015-1641" + }, + "waf.sub_violations": { + "type": "string", + "example": "HTTP protocol compliance failed:Host header contains IP address" + }, + "waf.violations": { + "type": "string", + "example": "HTTP protocol compliance failed,Illegal meta character in value,Attack signature detected" + }, + "waf.x_forwarded_for_header_value": { + "type": "string", + "example": "87.233.120.158" + }, + "waf.bot_anomalies": { + "type": "string", + "example": "Search Engine Verification Failed" + }, + "waf.bot_category": { + "type": "string", + "example": "Search Engine" + }, + "waf.bot_client_class": { + "type": "string", + "example": "Malicious Bot" + }, + "waf.bot_signature_name": { + "type": "string", + "example": "Google" + }, + "waf.enforced_bot_anomalies": { + "type": "string", + "example": "Search Engine Verification Failed" + }, + "action_type": { + "type": "string", + "example": "login" + }, + "action_outcome": { + "type": "string", + "example": "success" + }, + "auth_provider.name": { + "type": "string", + "example": "local" + }, + "auth_provider.type": { + "type": "string", + "example": "BASIC" + }, + "resource_name": { + "type": "string", + "example": "datadog-forwarder" + }, + "big_ip_address": { + "type": "string", + "example": "https://87.233.120.158" + }, + "workload_health_state": { + "type": "string", + "example": "unhealthy" + } + } + }, + "Metadata": { + "type": "object", + "properties": { + "pagination": { + "$ref": "#/components/schemas/MetadataPagination" + } + } + }, + "MetadataPagination": { + "type": "object", + "required": [ + "pageToken" + ], + "properties": { + "pageToken": { + "type": "string", + "example": "1594048781" + }, + "links": { + "$ref": "#/components/schemas/PaginationLinks" + }, + "total": { + "type": "integer", + "example": 3600 + } + } + }, + "PaginationLinks": { + "type": "object", + "properties": { + "next": { + "$ref": "#/components/schemas/SelfLinks" + }, + "prev": { + "$ref": "#/components/schemas/SelfLinks" + } + } + }, + "Forwarder": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/DesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/CurrentStatus" + } + }, + "example": { + "metadata": { + "name": "splunk", + "displayName": "Splunk - Metrics", + "description": "Metrics forwarder going to Splunk HEC", + "kind": "forwarder", + "uid": "d290f1ee-6c54-4b01-90e6-d701748f0851", + "links": { + "rel": "/api/v1/analytics/forwarders/splunk" + }, + "createTime": "2019-07-29T09:12:33.001Z", + "updateTime": "2019-07-29T10:12:33.001Z" + }, + "desiredState": { + "collectorType": "SPLUNK", + "integrationRef": { + "ref": "/platform/integrations/new_splunk_hec" + }, + "streams": [ + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.bytes_rcvd&filter=app='myapp'" + }, + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.count&filter=app='myapp'" + } + ] + }, + "currentStatus": { + "state": { + "selfConfigState": { + "isConfigured": false, + "isConfiguring": true, + "isError": false, + "isDeleting": false, + "total": 1, + "configured": 0, + "configuring": 1, + "error": 0, + "deleting": 0 + } + }, + "collectorType": "SPLUNK", + "integrationRef": { + "ref": "/platform/integrations/splunk_hec" + }, + "streams": [ + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.bytes_rcvd&filter=app='myapp'" + }, + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.count&filter=app='myapp'" + } + ] + } + } + }, + "CurrentStatus": { + "type": "object", + "required": [ + "integrationRef", + "streams", + "state", + "collectorType" + ], + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "integrationRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "collectorType": { + "type": "string", + "enum": [ + "DATADOG", + "SPLUNK", + "SYSLOG", + "OTLP_HTTP", + "OTLP_GRPC" + ], + "description": "The type of Collector receiving forwarded data. The parameter sets the communication protocol\nand how the Forwarder monitors the liveness of the Collector.\n" + }, + "streams": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ForwarderStream" + } + } + } + }, + "DesiredState": { + "type": "object", + "required": [ + "integrationRef", + "streams", + "collectorType" + ], + "properties": { + "integrationRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "collectorType": { + "type": "string", + "enum": [ + "DATADOG", + "SPLUNK", + "SYSLOG" + ], + "description": "The type of Collector receiving forwarded data. The parameter sets the communication protocol\nand how the Forwarder monitors the liveness of the Collector.\n" + }, + "streams": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ForwarderStream" + } + } + } + }, + "ForwarderStream": { + "type": "object", + "required": [ + "outputFormat", + "inputDataType", + "selector" + ], + "description": "There is no global limitation on number of streams.\n\nWhen creating a Stream, be sure that the metrics you select are not already included in an existing Stream.\nWhen you add the same metrics to multiple Streams -- which is known as \"overlapping Streams\" -- you may see significant performance degradation and data duplication. The duplicated data may not be interpreted correctly by the Collector.\n", + "properties": { + "outputFormat": { + "type": "string", + "enum": [ + "SPLUNK", + "DATADOG", + "SYSLOG", + "OTLP" + ], + "description": "The format in which you want to send data to the Collector.\n", + "example": "SPLUNK" + }, + "inputDataType": { + "type": "string", + "enum": [ + "METRICS", + "EVENTS" + ], + "description": "The type of data that the stream is processing.", + "example": "METRICS" + }, + "selector": { + "type": "string", + "description": "The selector object limits the amount of data sent from NGINX Controller to the desired Collector.\nThe format of this parameter depends on the `inputDataType` value (either `METRICS` or `EVENTS`).\nAn empty selector indicates that all collected data is going to be forwarded to the Collector.\nEnsure sending all your data out of NGINX Controller does not cause additional charges.\n\n\nSelector elements and rules for `inputDataType`=`METRICS`:\nAllowed parameters: `excluded_names`, `names`, `filter`. The `filter` parameter and its behavior is not specific to `METRICS` `inputDataType`.\n\n`excluded_names` parameter consists of zero or more metric names that should not be forwarded\n`names` parameter consists of zero or more metric names that should be forwarded\nThe specific rules for these parameters are as follows:\n- if no value is provided or the only value is a wildcard (`*`), then all metric names will be forwarded.\n- if an explicit list of metric names is provided, all of these metrics will be forwarded.\n- any metric name can contain wildcard (`*`) character(s), in which case it will be resolved to all valid metrics that match the pattern.\n- `excluded_names` can be composed of metrics names and a wildcard (`*`) which should not be forwarded\n- any metric resolved from a `names` wildcard can be removed by an explicit or wildcard metric from `excluded_names`\n- any metric provided explicitly (full name without a wildcard) in the `names` parameter cannot be excluded by\n a wildcard or an explicit metric from the `excluded_names` parameter. That is, `names=system.cpu.idle&excluded_names=system.*`\n results in an error\n- `excluded_names` parameter is always applied after the resolution of the metrics from `names` parameter\n\nMake sure that all metrics provided in the `names` and `excluded_names` parameters resolve to an existing catalog metric or metrics.\n\nSample use:\n- `names=http.request.count, http.request.bytes_sent` indicates that only samples of those two metrics will be forwarded\n- `filter=app='myapp' & (env='prod' or env='dev')` indicates that only samples related to desired app/env will be forwarded,\nalso objects that does not contain filtered dimensions won't be sent\n- `names=http.request.count, http.request.bytes_sent&filter=app='myapp' AND (env='prod' or env='dev')` combines both above\n- `excluded_names=nginx.http.request.*` all metrics except matching `http.request.*` will be forwarded\n- `excluded_names=nginx.http.request.time.*&names=nginx.http.request.*` `excluded_names` can be combined\n with the `names` parameter to exclude metrics that were selected with a wildcard in the `names` parameters\n\n\nFiltering rules for `inputDataType`=`EVENTS`:\nAllowed parameters: `filter`. The `filter` parameter and its behavior is not specific to `EVENTS` `inputDataType`.\n\n\nFilter parameter:\nFilter parameter is shared between all supported data types and has the same behavior when used with any of them.\nIt consists of one or more predicates in the form `` where:\n- `` is the name of the dimension,\n- `` is one of the supported operators (`=`, `!=`, `<`, `<=`, `>=` `>`, `in` or `not`),\n- `` is value of the dimension(s) that you want to filter on,\n- ` and ` is case sensitive.\n\nFor example: 'count > 100'.\nPredicates can be combined into logical expressions using `OR`, `AND`, and `(` `)`.\nWrapping predicates in single quotes is recommended to ensure that the full query string is processed correctly.\n\nFor matching values, wildcard (`*`) use is supported.\n\nMake sure that all dimensions provided in the `filter` parameters resolve to an existing catalog dimension.\n\nAny expression with dimension not present in filtered metric/event will evaluate to `false`. This enables user\nto create filter which will forward metric/event with dimension not valid for particular metric/event but as whole filter expression\nwill evaluate to true and only parts will evaluate to false. Example:\n- `filter=file_path='/etc/hosts' OR instance='instance_name'` will forward `http.request.body_bytes_sent` as `file_path` dimension is invalid but whole expression will evaluate to true\n- `filter=file_path='/etc/hosts' AND instance='instance_name'` will not forward `http.request.body_bytes_sent` as `file_path` dimension is invalid and whole expression will always evaluate to false, but will forward `sytem.disk.used` which has both dimensions\n", + "example": "names=http.request.count,http.request.*,*latency*&filter=((app!='app1' OR app='app2') AND environment in ('aa', 'bb')) AND (tags!='*tag1*' AND interface='ens*')" + } + } + }, + "ForwarderListResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Forwarder" + }, + "description": "List of Forwarder resources." + } + }, + "example": { + "items": [ + { + "metadata": { + "name": "splunk", + "displayName": "Splunk - Metrics", + "description": "Metrics forwarder going to Splunk HEC", + "kind": "forwarder", + "uid": "d290f1ee-6c54-4b01-90e6-d701748f0851", + "links": { + "rel": "/api/v1/analytics/forwarders/splunk" + }, + "createTime": "2019-07-29T09:12:33.001Z", + "updateTime": "2019-07-29T10:12:33.001Z" + }, + "desiredState": { + "collectorType": "SPLUNK", + "integrationRef": { + "ref": "/platform/integrations/splunk_hec" + }, + "streams": [ + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.bytes_rcvd" + }, + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.count" + } + ] + }, + "currentStatus": { + "state": { + "selfConfigState": { + "isConfigured": true, + "isConfiguring": false, + "isError": false, + "isDeleting": false, + "total": 1, + "configured": 1, + "configuring": 0, + "error": 0, + "deleting": 0 + }, + "conditions": [ + { + "type": "error", + "message": "integration reference not found" + } + ] + }, + "collectorType": "SPLUNK", + "integrationRef": { + "ref": "/platform/integrations/splunk_hec" + }, + "streams": [ + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.bytes_rcvd&filter=app='myapp'" + }, + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.count&filter=app='myapp'" + } + ] + } + }, + { + "metadata": { + "name": "splunk", + "displayName": "Splunk - Metrics", + "description": "Metrics forwarder going to Splunk HEC", + "kind": "forwarder", + "uid": "d290f1ee-6c54-4b01-90e6-d701748f0851", + "links": { + "rel": "/api/v1/analytics/forwarders/splunk" + }, + "createTime": "2019-07-29T09:12:33.001Z", + "updateTime": "2019-07-29T10:12:33.001Z" + }, + "desiredState": { + "collectorType": "SPLUNK", + "integrationRef": { + "ref": "/platform/integrations/splunk_hec" + }, + "streams": [ + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.bytes_rcvd&filter=app='myapp'" + }, + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.count&filter=app='myapp'" + } + ] + }, + "currentStatus": { + "state": { + "selfConfigState": { + "isConfigured": true, + "isConfiguring": false, + "isError": false, + "isDeleting": false, + "total": 0, + "configured": 1, + "configuring": 0, + "error": 0, + "deleting": 0 + }, + "conditions": [ + { + "type": "error", + "message": "integration reference not found" + } + ] + }, + "collectorType": "SPLUNK", + "integrationRef": { + "ref": "/platform/integrations/splunk_hec" + }, + "streams": [ + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.bytes_rcvd&filter=app='myapp'" + }, + { + "inputDataType": "METRICS", + "outputFormat": "SPLUNK", + "selector": "names=http.request.count&filter=app='myapp'" + } + ] + } + } + ] + } + }, + "ConfigStateTally": { + "type": "object", + "properties": { + "isConfigured": { + "type": "boolean", + "description": "The configuration operation is complete." + }, + "isConfiguring": { + "type": "boolean", + "description": "The configuration of the resource, or of its child(ren), is in process." + }, + "isError": { + "type": "boolean", + "description": "An error occurred while configuring the resource or its child(ren)." + }, + "isDeleting": { + "type": "boolean", + "description": "A delete operation is in progress for the resource or its child(ren)." + }, + "total": { + "type": "integer", + "description": "The total number of resources to which the configuration operation applies." + }, + "configured": { + "type": "integer", + "description": "The number of resources that have a complete and valid configuration." + }, + "configuring": { + "type": "integer", + "description": "The number of resources that are in the process of being configured." + }, + "error": { + "type": "integer", + "description": "The number of resources that have encountered an error during the configuration process." + }, + "deleting": { + "type": "integer", + "description": "The number of resources that are in the process of being deleted." + } + } + }, + "ConfigCondition": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The condition type." + }, + "message": { + "type": "string", + "description": "A human-readable message that provides additional information about the configuration operation." + } + } + }, + "ConfigState": { + "type": "object", + "description": "A representation of the resource's current configuration state \nthat comprises the status of the resource itself (`selfConfigState`) and any child \nresources (`childrenConfigState`).\n\nThe conditions array provides additional information during configuration changes.\n", + "properties": { + "selfConfigState": { + "$ref": "#/components/schemas/ConfigStateTally" + }, + "childrenConfigState": { + "$ref": "#/components/schemas/ConfigStateTally" + }, + "conditions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ConfigCondition" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/content/controller/api/reference/ctlr-apim-api.md b/content/controller/api/reference/ctlr-apim-api.md new file mode 100644 index 000000000..0003f6235 --- /dev/null +++ b/content/controller/api/reference/ctlr-apim-api.md @@ -0,0 +1,14 @@ +--- +description: Represents the state of the F5 NGINX Controller API Management REST API. +docs: DOCS-1281 +doctypes: + - reference +type: redoc +tags: + - api +title: APIM API +toc: false +weight: 400 +--- + +{{< openapi spec="/controller/api/reference/ctlr-apim-openapi.json" >}} diff --git a/content/controller/api/reference/ctlr-apim-openapi.json b/content/controller/api/reference/ctlr-apim-openapi.json new file mode 100644 index 000000000..e8879596d --- /dev/null +++ b/content/controller/api/reference/ctlr-apim-openapi.json @@ -0,0 +1,12547 @@ +{ + "openapi": "3.0.0", + "info":{ + "title": "NGINX Controller API Management REST API", + "version": "v1", + "description": "Manage the NGINX Controller APIM module." + }, + "servers": [ + { + "description": "NGINX Controller API", + "url": "https://{{CONTROLLER_FQDN}}/api/v1" + }], + "tags": [ + { + "name": "Instances", + "description": "Use the Instances API to manage NGINX Controller Instance resources." + }, + { + "name": "API Definitions", + "description": "Use the \"API Definitions\" API to manage your APIs by using the NGINX Controller API Management module." + }, + { + "name": "Environments", + "description": "Use the Environments API to manage your Application Environments." + }, + { + "name": "Services", + "description": "Use the Services API to request a metadata list of a desired resource within a single environment or across all environments.\nSupported resources:\n - published-apis\n" + }, + { + "name": "DevPortals", + "description": "Use the DevPortals API to manage DevPortals resources." + }, + { + "name": "Instance Groups", + "description": "Use the Instance Groups API to manage a set of instances that can be used for scaling and high availability." + }, + { + "name": "Identity Providers", + "description": "Use the Identity Provider API to manage Identity providers in the API-M Credential Management partition." + }, + { + "name": "Certs", + "description": "Use the Certs API to manage the certificates used to secure your App traffic." + }, + { + "name": "Gateways", + "description": "Use the Gateways API to manage Gateway resources." + }, + { + "name": "Error Sets", + "description": "Use the Error Sets API to view the default predefined Error Sets." + }, + { + "name": "Published APIs", + "description": "Use the Published APIs API to manage your APIs by using the API Management module." + }, + { + "name": "Components", + "description": "Use the Components API to define child components (for example, microservices) for your Apps." + }, + { + "name": "Locations", + "description": "Use the Locations API to manage the deployment locations for NGINX Controller." + }, + { + "name": "Instance Templates", + "description": "Use the Instance Templates API to manage templates that can be used to deploy new NGINX Plus instances." + }, + { + "name": "Apps", + "description": "Use the Apps API to manage App resources." + }, + { + "name": "Integrations", + "description": "Use the Integrations API to manage integrated cloud provider accounts." + }, + { + "name": "Policies", + "description": "Use the Policies API to manage the policies used to secure your App traffic." + } + ], + "paths": { + "/infrastructure/instance-groups": { + "get": { + "tags": [ + "Instance Groups" + ], + "summary": "List Instance Groups", + "description": "Returns an unfiltered list of all Instance Group resources.", + "operationId": "listInstanceGroups", + "responses": { + "200": { + "description": "Successfully retieved a list of all the configured Instance Groups.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListInstanceGroupsResponse" + }, + "example": { + "value": { + "items": [ + { + "currentStatus": { + "instanceRefs": [], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": {}, + "metadata": { + "createTime": "2020-05-13T09:29:49.464273Z", + "description": "us-west-2 autoscale group", + "displayName": "aws-autoscale-group", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/amz-us-west-2-as-group" + }, + "name": "amz-us-west-2-as-group", + "uid": "802ef1f8-9105-474a-b9a7-599837efd6b4", + "updateTime": "2020-05-13T09:29:49.464273Z" + } + }, + { + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": {}, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + ] + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + } + } + }, + "post": { + "tags": [ + "Instance Groups" + ], + "summary": "Create an Instance Group", + "description": "Creates an new Instance Group resource.", + "operationId": "addInstanceGroup", + "requestBody": { + "description": "Defines the Instance Group resource to be added.", + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceGroup" + }, + "example": { + "value": { + "metadata": { + "name": "k8s-nginx-deploy", + "displayName": "K8S NGINX+ deployment", + "description": "k8s-nginx-deploy" + }, + "desiredState": {} + } + } + } + } + }, + "responses": { + "202": { + "description": "The Instance Group resource has been accepted for creation. The Instance Group will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceGroupResponse" + }, + "example": { + "value": { + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + }, + "desiredState": {}, + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + } + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/instance-groups/{instanceGroupName}": { + "get": { + "tags": [ + "Instance Groups" + ], + "summary": "Get an Instance Group", + "description": "Returns information about a specified Instance Groupe resource.", + "operationId": "getInstanceGroup", + "parameters": [ + { + "$ref": "#/components/parameters/InstanceGroupName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Instance Group resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceGroupResponse" + }, + "example": { + "value": { + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + }, + "desiredState": {}, + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + } + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + } + } + }, + "put": { + "tags": [ + "Instance Groups" + ], + "summary": "Upsert an Instance Group", + "description": "Creates a new Instance Group resource or updates an existing Instance Group resource.", + "operationId": "upsertInstanceGroup", + "parameters": [ + { + "$ref": "#/components/parameters/InstanceGroupName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceGroup" + }, + "example": { + "value": { + "metadata": { + "name": "k8s-nginx-deploy", + "displayName": "K8S NGINX+ deployment", + "description": "k8s-nginx-deploy" + }, + "desiredState": {} + } + } + } + }, + "required": true + }, + "responses": { + "202": { + "description": "The Instance Group resource has been accepted for creation or update.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceGroupResponse" + }, + "example": { + "value": { + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + }, + "desiredState": {}, + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + } + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + } + } + }, + "delete": { + "tags": [ + "Instance Groups" + ], + "summary": "Delete an Instance Group", + "description": "Deletes the specified Instance Group resource.", + "operationId": "deleteInstanceGroup", + "parameters": [ + { + "$ref": "#/components/parameters/InstanceGroupName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Instance Group resource." + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/locations": { + "get": { + "tags": [ + "Locations" + ], + "summary": "List all Locations", + "description": "Returns a list of all Locations.", + "operationId": "listLocations", + "responses": { + "200": { + "description": "Successfully retrieved a list of all of the configured Locations.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListLocationResponse" + }, + "example": { + "value": { + "items": [ + { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T09:29:49.464273Z", + "description": "Location for instances where location has not been specified", + "displayName": "Unspecified (default)", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/unspecified" + }, + "name": "unspecified", + "tags": [ + "default" + ], + "uid": "802ef1f8-9105-474a-b9a7-599837efd6b4", + "updateTime": "2020-05-13T09:29:49.464273Z" + } + }, + { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "Other Location for managing instances", + "displayName": "OtherLocation-1", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-other-location" + }, + "name": "my-other-location", + "tags": [ + "dev", + "prod" + ], + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + ] + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + } + } + }, + "post": { + "tags": [ + "Locations" + ], + "summary": "Create a Location", + "description": "Creates a new Location resource.", + "operationId": "addLocation", + "requestBody": { + "description": "Defines the Location resource to be added.", + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Location" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationRequest" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationRequest" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationRequest" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested Location.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetLocationResponse" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationResponse" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationResponse" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationResponse" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "405": { + "$ref": "#/components/responses/NotAllowed" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/locations/{locationName}": { + "get": { + "tags": [ + "Locations" + ], + "summary": "Get a Location", + "description": "Returns information about a specified Location resource.", + "operationId": "getLocation", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Location resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetLocationResponse" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationResponse" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationResponse" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationResponse" + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + } + } + }, + "put": { + "tags": [ + "Locations" + ], + "summary": "Upsert a Location", + "description": "Creates a new Location resource or updates an existing Location resource.", + "operationId": "upsertLocation", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Location" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationRequest" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationRequest" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationRequest" + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successfully received the request to update the specified Location resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetLocationResponse" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationResponse" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationResponse" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationResponse" + } + } + } + } + }, + "201": { + "description": "Successfully updated the specified Location resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetLocationResponse" + }, + "examples": { + "OTHER_LOCATION": { + "$ref": "#/components/examples/OtherLocationResponse" + }, + "AWS_LOCATION": { + "$ref": "#/components/examples/AWSLocationResponse" + }, + "AZURE_LOCATION": { + "$ref": "#/components/examples/AzureLocationResponse" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "405": { + "$ref": "#/components/responses/NotAllowed" + } + } + }, + "delete": { + "tags": [ + "Locations" + ], + "summary": "Delete a Location", + "description": "Deletes the specified Location resource.", + "operationId": "deleteLocation", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Location resource." + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "405": { + "$ref": "#/components/responses/NotAllowed" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/locations/{locationName}/instances": { + "get": { + "tags": [ + "Instances" + ], + "summary": "List all Instances in a Location", + "description": "Returns the status and metadata for all of the Instances in the specified Location.", + "operationId": "listInstances", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "responses": { + "200": { + "description": "Successfully returned the status and metadata for all of the Instances in the specified Location.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListInstanceResponse" + }, + "examples": { + "INSTANCES": { + "$ref": "#/components/examples/ListInstanceResponse" + } + } + } + } + } + } + }, + "post": { + "tags": [ + "Instances" + ], + "summary": "Create an Instance", + "description": "Creates a new Instance resource.", + "operationId": "createInstance", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceRequest" + }, + "examples": { + "AWS_INSTANCE": { + "$ref": "#/components/examples/AWSInstanceRequest" + }, + "AZURE_INSTANCE": { + "$ref": "#/components/examples/AzureInstanceRequest" + } + } + } + } + }, + "responses": { + "202": { + "description": "The Instance resource has been accepted for creation. The Instance will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceResponse" + }, + "examples": { + "AWS_INSTANCE": { + "$ref": "#/components/examples/AWSInstance" + } + } + } + } + }, + "400": { + "description": "Bad input parameter or URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error creating the instance: could not parse the request payload. Check the format of the request, then try again.", + "code": 120647 + } + } + } + }, + "409": { + "description": "The request failed due to a conflict with an existing Instance resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error creating the instance: the instance already exists. Use a unique name for the instance, then try again.", + "code": 120652 + } + } + } + } + } + } + }, + "/infrastructure/locations/{locationName}/instances/{instanceName}": { + "get": { + "tags": [ + "Instances" + ], + "summary": "Get an Instance", + "description": "Returns the status and metadata for a single Instance.", + "operationId": "getInstance", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceName" + } + ], + "responses": { + "200": { + "description": "Successfully returned the details for an Instance.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceResponse" + }, + "examples": { + "OTHER_INSTANCE": { + "$ref": "#/components/examples/OtherInstance" + }, + "AWS_INSTANCE": { + "$ref": "#/components/examples/AWSInstance" + }, + "AZURE_INSTANCE": { + "$ref": "#/components/examples/AzureInstance" + } + } + } + } + }, + "404": { + "description": "Instance not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error getting the instance: the specified instance does not exist. Check the instance name, then try again.", + "code": 120603 + } + } + } + } + } + }, + "put": { + "tags": [ + "Instances" + ], + "summary": "Update an Instance", + "description": "Updates the description or display name of an existing Instance.", + "operationId": "updateInstance", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceRequest" + }, + "examples": { + "OTHER_INSTANCE": { + "$ref": "#/components/examples/InstanceUpdateRequest" + }, + "AWS_INSTANCE": { + "$ref": "#/components/examples/InstanceUpdateRequest" + }, + "AZURE_INSTANCE": { + "$ref": "#/components/examples/InstanceUpdateRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the existing Instance.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceResponse" + }, + "examples": { + "OTHER_INSTANCE": { + "$ref": "#/components/examples/OtherInstance" + }, + "AWS_INSTANCE": { + "$ref": "#/components/examples/AWSInstance" + }, + "AZURE_INSTANCE": { + "$ref": "#/components/examples/AzureInstance" + } + } + } + } + }, + "400": { + "description": "Bad input parameter or URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error updating the instance: could not parse the request payload. Check the format of the request, then try again.", + "code": 120612 + } + } + } + }, + "404": { + "description": "Instance not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error updating the instance: the specified instance does not exist. Check the instance name, then try again.", + "code": 120614 + } + } + } + } + } + }, + "delete": { + "tags": [ + "Instances" + ], + "summary": "Delete an Instance", + "operationId": "deleteInstance", + "description": "Deletes the specified Instance.", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceName" + } + ], + "responses": { + "202": { + "description": "Request for delete accepted", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceResponse" + } + } + } + }, + "204": { + "description": "Successfully deleted the Instance. No content is returned." + }, + "404": { + "description": "Instance not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error deleting the instance: the specified instance does not exist. Check the instance name, then try again.", + "code": 120609 + } + } + } + }, + "409": { + "description": "Failed to delete the requested Instance resource.\n\nThe Instance is referenced by another resource. Remove the references to the Instance, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error deleting the instance: the NGINX instance is being configured. Try again later. If the problem persists, contact the system administrator.", + "code": 120640 + } + } + } + } + } + } + }, + "/infrastructure/locations/{locationName}/instance-templates": { + "get": { + "tags": [ + "Instance Templates" + ], + "summary": "List Instance Templates", + "description": "Returns an unfiltered list of all Instance Template resources in the specified Location.", + "operationId": "listInstanceTemplates", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved a list of all Instance Template resources for the specified Location.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListInstanceTemplateResponse" + }, + "examples": { + "INSTANCE_TEMPLATES": { + "$ref": "#/components/examples/AWSListResponse" + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + } + } + }, + "post": { + "tags": [ + "Instance Templates" + ], + "summary": "Create an Instance Template", + "description": "Creates a new Instance Template resource.", + "operationId": "addInstanceTemplate", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + } + ], + "requestBody": { + "description": "Defines the Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceTemplate" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSRequest" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureRequestWithMarketplaceImageAndUsingExistingNic" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceTemplateResponse" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSResponse" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureResponse" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + } + }, + "/infrastructure/locations/{locationName}/instance-templates/{instanceTemplateName}": { + "get": { + "tags": [ + "Instance Templates" + ], + "summary": "Get an Instance Template", + "description": "Gets information for the specified Instance Template resource.", + "operationId": "getInstanceTemplate", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceTemplateName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceTemplateResponse" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSResponse" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureResponse" + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + } + } + }, + "put": { + "tags": [ + "Instance Templates" + ], + "summary": "Upsert an Instance Template", + "description": "Creates a new Instance Template resource or updates an existing Instance Template resource.", + "operationId": "upsertInstanceTemplate", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceTemplateName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceTemplate" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSRequest" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureRequestWithCustomImageAndCreatingNewNicAndPublicIP" + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successfully updated the specified Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceTemplateResponse" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSResponse" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureResponse" + } + } + } + } + }, + "201": { + "description": "Successfully created the requested Instance Template resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetInstanceTemplateResponse" + }, + "examples": { + "AWS_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AWSResponse" + }, + "AZURE_INSTANCE_TEMPLATE": { + "$ref": "#/components/examples/AzureResponse" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "409": { + "$ref": "#/components/responses/Conflict" + } + } + }, + "delete": { + "tags": [ + "Instance Templates" + ], + "summary": "Delete an Instance Template", + "description": "Deletes the specified Instance Template resource.", + "operationId": "deleteInstanceTemplate", + "parameters": [ + { + "$ref": "#/components/parameters/LocationName" + }, + { + "$ref": "#/components/parameters/InstanceTemplateName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Instance Template resource." + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "404": { + "$ref": "#/components/responses/NotFound" + } + } + } + }, + "/platform/integrations": { + "get": { + "tags": [ + "Integrations" + ], + "summary": "List all Integrations", + "description": "Returns an unfiltered list of account Integrations.", + "operationId": "listIntegrations", + "x-f5-experimental": true, + "responses": { + "200": { + "description": "Successfully retreived all Integration accounts.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListIntegrationResponse" + } + } + } + } + } + }, + "post": { + "tags": [ + "Integrations" + ], + "summary": "Create an Integration account", + "description": "Creates a new Integration account.", + "operationId": "addIntegration", + "x-f5-experimental": true, + "requestBody": { + "description": "Defines the Integration account to be added.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Integration" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested Integration.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a conflict with an existing Integration.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/platform/integrations/{integrationName}": { + "get": { + "tags": [ + "Integrations" + ], + "summary": "Get an Integration account", + "description": "Gets information about a specific Integration account.", + "operationId": "getIntegration", + "x-f5-experimental": true, + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "Integration not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Integrations" + ], + "summary": "Update an Integration account", + "description": "Updates an Integration account.", + "operationId": "updateIntegration", + "x-f5-experimental": true, + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Integration" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successfully received the request to update the specified Integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + } + } + } + }, + "201": { + "description": "Successfully updated the specified Integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Integrations" + ], + "summary": "Delete an Integration account", + "description": "Deletes the specified Integration account resource.", + "operationId": "deleteIntegration", + "x-f5-experimental": true, + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Integration resource." + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/security/identity-providers": { + "get": { + "tags": [ + "Identity Providers" + ], + "summary": "List Identity Providers", + "description": "Returns a list of all Identity Provider resources.\n\n> **Note:** These resources were known as Client Groups in pre-3.x versions of NGINX Controller.\n", + "operationId": "identityProvidersSearch", + "responses": { + "200": { + "description": "Successfully retrieved a list of Identity Provider resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/security/identity-providers/{identityProviderName}": { + "get": { + "tags": [ + "Identity Providers" + ], + "summary": "Get an Identity Provider", + "description": "Returns the specified Identity Provider resource.", + "operationId": "identityProvidersGet", + "responses": { + "200": { + "description": "Successfully returnd the specified Identity Provider resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "403": { + "description": "The request failed due to insufficient privileges.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Identity Providers" + ], + "summary": "Upsert an Identity Provider", + "description": "Creates a new Identity Provider, or creates an existing Identity Provider resource.\n", + "operationId": "identityProvidersPut", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + }, + "examples": { + "IdentityProviderRequest": { + "$ref": "#/components/examples/IdentityProviderRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Identity Provider resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "201": { + "description": "Successfully created the requested Identity Provider resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "202": { + "description": "Successfully accepted the requested Identity Provider resource and is currently processing it.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update or create an Identity Provider resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Identity Providers" + ], + "summary": "Delete an Identity Provider", + "description": "Deletes the specified Identity Provider resource.", + "operationId": "identityProvidersDelete", + "responses": { + "202": { + "description": "The Identity Provider resource has been marked for deletion. The resource will be deleted after the publish/cleanup succeeds.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Identity Provider resource. No content is returned." + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to delete an Identity Provider resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "500": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/identityProviderName" + } + ] + }, + "/security/identity-providers/{identityProviderName}/clients": { + "get": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "List Identity Provider Clients", + "description": "Returns a list of all Identity Provider Client resources.\n", + "operationId": "identityProviderClientsSearch", + "responses": { + "200": { + "description": "Successfully retrieved a list of Identity Provider Client resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClientList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Create Multiple Identity Provider Clients", + "description": "Creates or updates multiple Identity Provider Client resources.\n", + "operationId": "identityProviderClientsPutList", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClientList" + }, + "examples": { + "IdentityProviderClientListRequest": { + "$ref": "#/components/examples/IdentityProviderClientListRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully upserted the Identity Provider Client resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClientList" + } + } + } + }, + "202": { + "description": "Successfully accepted the requested Identity Provider Client resources and is currently processing it.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClientList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The Identity Provider Client resource conflicts with another resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/identityProviderName" + } + ] + }, + "/security/identity-providers/{identityProviderName}/clients/{identityProviderClientName}": { + "get": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Get an Identity Provider Client", + "description": "Returns information for the specified Identity Provider Client resource.", + "operationId": "identityProviderClientsGet", + "responses": { + "200": { + "description": "Successfully returned the specified Identity Provider Client resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "404": { + "description": "The specified Identity Provider Client resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Update an Identity Provider Client", + "description": "Updates the specified Identity Provider Client resource.", + "operationId": "identityProviderClientsPut", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + }, + "examples": { + "IdentityProviderClientRequest": { + "$ref": "#/components/examples/IdentityProviderClientRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Identity Provider Client resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "201": { + "description": "Successfully created the specified Identity Provider Client resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "202": { + "description": "Successfully accepted the requested Identity Provider Client resource and is currently processing it.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update or create an Identity Provider Client resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Update an Identity Provider Client", + "description": "Updates the specified Identity Provider Client resource.", + "operationId": "identityProviderClientsPatch", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateIdentityProviderClient" + }, + "examples": { + "IdentityProviderClientPatchMetadataRequest": { + "$ref": "#/components/examples/IdentityProviderClientPatchMetadataRequest" + }, + "IdentityProviderClientPatchDesiredStateRequest": { + "$ref": "#/components/examples/IdentityProviderClientPatchDesiredStateRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Identity Provider Client resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "202": { + "description": "Successfully accepted the request to update an Identity Provider Client resource and is currently processing it.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider Client resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update an Identity Provider Client resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Identity Provider Clients" + ], + "summary": "Delete an Identity Provider Client", + "description": "Deletes the specified Identity Provider Client resource.", + "operationId": "identityProviderClientsDelete", + "responses": { + "202": { + "description": "Identity Provider Client resource has been marked for deletion. The resource will be\ndeleted after the publish/cleanup succeeds.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Identity Provider Client resource. No content is returned." + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Identity Provider Client resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to delete an Identity Provider Client resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/identityProviderName" + }, + { + "$ref": "#/components/parameters/identityProviderClientName" + } + ] + }, + "/services": { + "x-f5-experimental": true, + "get": { + "tags": [ + "Services" + ], + "summary": "List the metadata for all instances of the desired resource.", + "description": "Returns a metadata list of the requested resource. The resources that can be queried is currently restricted to published-apis.\n", + "operationId": "listResources", + "responses": { + "200": { + "description": "Successfully retrieved a metadata list of the requested resource.", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceMeta" + } + } + } + } + }, + "400": { + "description": "The resource defined in the query parameters could not be found or is not yet supported. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The environment defined in the query parameters could not be found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "resource", + "in": "query", + "description": "Desired resource to list.", + "required": true, + "style": "form", + "explode": false, + "schema": { + "type": "string", + "enum": [ + "published-apis" + ] + } + }, + { + "name": "environment", + "in": "query", + "description": "Filter desired resource based on the environment.", + "required": false, + "style": "form", + "explode": false, + "schema": { + "type": "string" + } + } + ] + }, + "/services/api-definitions": { + "get": { + "tags": [ + "API Definitions" + ], + "summary": "List API Definitions", + "description": "Returns a list of API Definition resources.", + "operationId": "apiDefinitionsSearch", + "responses": { + "200": { + "description": "Successfully retrieved a list of all API Definitions.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionList" + } + } + } + } + } + } + }, + "/services/api-definitions/{apiDefinitionName}": { + "get": { + "tags": [ + "API Definitions" + ], + "summary": "Get an API Definition", + "description": "Gets information about a specified API Definition.\n", + "operationId": "apiDefinitionsGet", + "responses": { + "200": { + "description": "Successfully returned the specified API Definition resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "404": { + "description": "The specified API Definition resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "API Definitions" + ], + "summary": "Upsert an API Definition", + "description": "Creates a new API Definition or updates an existing API Definition resource.\n", + "operationId": "apiDefinitionsPut", + "requestBody": { + "description": "Defines the API Definition resource to create, or the updates to apply to an existing API Definition resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified API Definition resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "201": { + "description": "Successfully created the requested API Definition resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "API Definitions" + ], + "summary": "Delete an API Definition", + "description": "Deletes the specified API Definition resource.", + "operationId": "apiDefinitionsDelete", + "responses": { + "204": { + "description": "Successfully deleted the requested API Definition resource. No content is returned.\n" + }, + "404": { + "description": "The specified API Definition resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the specified API Definition resource. Delete any referenced Published APIs or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/apiDefinitionName" + } + ] + }, + "/services/api-definitions/{apiDefinitionName}/versions": { + "get": { + "tags": [ + "API Definition Versions" + ], + "summary": "List API Definition Versions", + "description": "Lists all Versions for the specified API Definition resource.\n", + "operationId": "definitionVersionsSearch", + "responses": { + "200": { + "description": "Successfully retrieved the list of API Definition Version resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersionList" + } + } + } + }, + "404": { + "description": "The specified API Definition resource was not found or does not contain any Versions.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "API Definition Versions" + ], + "summary": "Add an API Definition Version", + "description": "Creates a single new API Definition Version resource or multiple Version resources.\n", + "operationId": "definitionVersionsPutList", + "requestBody": { + "description": "Creates, updates, or deletes an API Definition Version resource. It interacts with a list of items.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersionList" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully received the request to create the API Definition Version resource(s).", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersionList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the specified API Definition Version resource(s) as it has at least one dependent Published API. Delete the referenced Published API(s), then try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "API Definition Versions" + ], + "summary": "Delete API Definition Versions", + "description": "Deletes all Versions for the specified API Definition resource.\n", + "operationId": "definitionVersionsDeleteList", + "responses": { + "204": { + "description": "Successfully deleted the Versions for the requested API Definition resource. No content is returned.\n" + }, + "404": { + "description": "The specified API Definition resource was not found or does not contain any Versions.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the Versions for the specified API Definition resource. Delete or remove any references to Published APIs, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/apiDefinitionName" + } + ] + }, + "/services/api-definitions/{apiDefinitionName}/versions/{version}": { + "get": { + "tags": [ + "API Definition Versions" + ], + "summary": "Get an API Definition Version", + "description": "Gets information about an API Definition Version resource.\n", + "operationId": "definitionVersionsGet", + "responses": { + "200": { + "description": "Successfully returned the specified API Definition Version resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "404": { + "description": "The specified API Definition Version resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "API Definition Versions" + ], + "summary": "Upsert an API Definition Version", + "description": "Creates a single new Version resource or updates an existing API Definition Version resource.\n", + "operationId": "definitionVersionsPut", + "requestBody": { + "description": "Creates or updates an API Definition Version resource", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified API Definition Version resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "201": { + "description": "Successfully created the requested API Definition Version resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "API Definition Versions" + ], + "summary": "Delete an API Definition Version", + "description": "Deletes an API Definition Version resource.\n", + "operationId": "definitionVersionsDelete", + "responses": { + "204": { + "description": "Successfully deleted the API Definition Version resource.\n" + }, + "404": { + "description": "The specified API Definition Version resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the specified API Definition resource. Delete or remove any references to Published APIs, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/apiDefinitionName" + }, + { + "$ref": "#/components/parameters/versionName" + } + ] + }, + "/services/api-definitions/{apiDefinitionName}/versions/{version}/import": { + "put": { + "tags": [ + "API Definition Version Import" + ], + "summary": "Import an API Definition Version", + "description": "Imports an API spec to the specified Version of an API Definition.\n\nUse this endpoint to import a raw API specification to define your API.\n\n- This endpoint accepts a valid OpenAPI 3 spec, formatted as valid JSON or YAML.\n- The file provided for import will be validated against the\n [OAS v3 schema](https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v3.0/schema.yaml).\n- You must specify a \"Content-Type\" header when importing an API spec.\n The endpoint accepts the following \"Content-Type\" values:\n\n - application/json\n - application/yaml\n - text/x-yaml\n - application/x-yaml\n - text/yaml\n - application/xml\n - text/xml\n", + "operationId": "definitionVersionsImport", + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object" + } + }, + "application/yaml": { + "schema": { + "type": "string" + } + }, + "text/x-yaml": { + "schema": { + "type": "string" + } + }, + "application/x-yaml": { + "schema": { + "type": "string" + } + }, + "text/yaml": { + "schema": { + "type": "string" + } + }, + "application/xml": { + "schema": { + "type": "string" + } + }, + "text/xml": { + "schema": { + "type": "string" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully completed the API Version Import request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "201": { + "description": "Successfully created the requested API Definition Version resource from the spec provided.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "415": { + "description": "The request body contains an unsupported content type.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/apiDefinitionName" + }, + { + "$ref": "#/components/parameters/versionName" + } + ] + }, + "/services/environments": { + "get": { + "tags": [ + "Environments" + ], + "summary": "List all Environments", + "description": "Returns a list of all Environment resources.\n", + "operationId": "listEnvironments", + "responses": { + "200": { + "description": "Successfully retrieved a list of all Environment resources.", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EnvironmentList" + } + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Environments" + ], + "summary": "Create an Environment", + "description": "Creates a new Environment resource.\n", + "operationId": "createEnvironment", + "requestBody": { + "description": "Defines the Environment resource to create.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Environment.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "202": { + "description": "The Environment resource has been accepted for creation. The Environment will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/services/environments/{environmentName}": { + "get": { + "tags": [ + "Environments" + ], + "summary": "Get an Environment", + "description": "Returns information for the specified Environment.\n", + "operationId": "getEnvironment", + "responses": { + "200": { + "description": "Successfully returned information for the specified Environment resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Environments" + ], + "summary": "Upsert an Environment", + "description": "Creates a new Environment or updates an existing Environment resource.\n", + "operationId": "updateEnvironment", + "requestBody": { + "description": "Defines the Environment to create or the updates to make to an existing Environment.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Environment resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "201": { + "description": "Successfully created the specified Environment resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "202": { + "description": "The Environment resource has been accepted for creation or update. The Environment will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Environments" + ], + "summary": "Delete an Environment", + "description": "Deletes the specified Environment resource.", + "operationId": "deleteEnvironment", + "responses": { + "202": { + "description": "The Environment resource has been marked for deletion. The Environment will be deleted after the underlying resources have been deleted.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Environment resource. No content returned.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "Failed to delete the requested Environment resource.\n\nThe Environment contains references to other objects. Delete the referenced objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/apps": { + "get": { + "tags": [ + "Apps" + ], + "summary": "List all Apps", + "description": "Returns a list of all App resources.\n", + "operationId": "listApps", + "responses": { + "200": { + "description": "Successfully retrieved a list of all App resources.", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AppList" + } + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Apps" + ], + "summary": "Create an App", + "description": "Creates a new App resource.", + "operationId": "createApp", + "requestBody": { + "description": "An App.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified App resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "202": { + "description": "The App resource has been accepted for creation. The App will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}": { + "get": { + "tags": [ + "Apps" + ], + "summary": "Get an App", + "description": "Gets the information for a specific App resource.\n", + "operationId": "getApp", + "responses": { + "200": { + "description": "Successfully retrieved information for the requested App resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Apps" + ], + "summary": "Upsert an App", + "description": "Creates a new App resource or updates an existing App resource.\n", + "operationId": "updateApp", + "requestBody": { + "description": "Defines the App resource to create or update.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified App resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "201": { + "description": "Successfully created the specified App resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "202": { + "description": "The App resource has been accepted for creation or update. The App will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Apps" + ], + "summary": "Delete an App", + "description": "Deletes the specified App resource.\n\nYou must delete all of an App's child resources before you delete the App.\n", + "operationId": "deleteApp", + "responses": { + "202": { + "description": "The App resource has been marked for deletion. The App will be deleted after the underlying resources have been deleted.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified App resource.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete the specified App resource failed.\nThe App contains references to active objects and cannot be deleted. Delete the child objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "appName", + "description": "The name of the App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/AppName" + } + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}/components": { + "get": { + "tags": [ + "Components" + ], + "summary": "List all Components", + "description": "Returns a list of all of the Component resources that are contained by the specified App.\n", + "operationId": "listAppComponents", + "responses": { + "200": { + "description": "Successfully retrieved a list of Component resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ComponentList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Components" + ], + "summary": "Create a Component", + "description": "Creates a new Component resource.\n", + "operationId": "createAppComponent", + "requestBody": { + "description": "Defines the Component resource to create.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + }, + "examples": { + "ComponentRequest": { + "$ref": "#/components/examples/ComponentRequest" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested Component resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "202": { + "description": "The Component resource has been accepted for creation. The Component will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to create a Component resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Component's parent App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "appName", + "description": "The name of the App that contains the Component resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/AppName" + } + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}/components/{componentName}": { + "get": { + "tags": [ + "Components" + ], + "summary": "Get a Component", + "description": "Returns information for the specified Component.\n", + "operationId": "getAppComponent", + "responses": { + "200": { + "description": "Successfully returned the requested Component resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Components" + ], + "summary": "Upsert a Component", + "description": "Creates a new Component or updates an existing Component resource.\n", + "operationId": "updateAppComponent", + "requestBody": { + "description": "Defines the Component resource to create or update.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + }, + "examples": { + "ComponentRequest": { + "$ref": "#/components/examples/ComponentRequest" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Component resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "201": { + "description": "Successfully created the specified Component resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "202": { + "description": "The Component resource has been accepted for creation or update. The Component will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update or create a Component resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Components" + ], + "summary": "Delete a Component", + "description": "Deletes the specified Component resource.", + "operationId": "deleteAppComponent", + "responses": { + "202": { + "description": "Component resource has been marked for deletion. The resource will be\ndeleted after the underlying resources have been freed.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Component. No content returned.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to delete a Component resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Component's parent App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "appName", + "description": "The name of the App that contains the Component resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/AppName" + } + }, + { + "name": "componentName", + "description": "The name of the Component resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/ComponentName" + } + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}/published-apis": { + "get": { + "tags": [ + "Published APIs" + ], + "summary": "List Published APIs", + "description": "Returns a list of all Published APIs for the specified Environment and App.\n", + "operationId": "listPublishedAPIs", + "responses": { + "200": { + "description": "Successfully retrieved the list of Published APIs for the specified Environment.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPIList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/environmentName" + }, + { + "$ref": "#/components/parameters/appName" + } + ] + }, + "/services/environments/{environmentName}/apps/{appName}/published-apis/{publishedApiName}": { + "get": { + "tags": [ + "Published APIs" + ], + "summary": "Get a Published API", + "description": "Gets information about the specified Published API.\n", + "operationId": "getPublishedAPI", + "responses": { + "200": { + "description": "Successfully retrieved the specified Published API resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Published API resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Published APIs" + ], + "summary": "Upsert a Published API", + "description": "Creates a new Published API or updates an existing Published API resource.", + "operationId": "upsertPublishedAPI", + "requestBody": { + "description": "Defines the Published API to create, or the updates to apply to an existing Published API resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Published API resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "201": { + "description": "Successfully created the requested Published API resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "202": { + "description": "The request to create a Published API succeeded. The resource will be created when the configuration is complete.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Published APIs" + ], + "summary": "Delete a Published API", + "description": "Deletes the specified Published API resource.", + "operationId": "deletePublishedAPI", + "responses": { + "202": { + "description": "Published API has been marked for deletion. The resource will be\ndeleted after the publish/cleanup succeeds.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Published API resource. No content is returned." + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The specified Published API resource was not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "$ref": "#/components/parameters/environmentName" + }, + { + "$ref": "#/components/parameters/appName" + }, + { + "$ref": "#/components/parameters/publishedApiName" + } + ] + }, + "/services/environments/{environmentName}/certs": { + "get": { + "tags": [ + "Certs" + ], + "summary": "List all Certs", + "description": "Returns a list of Cert metadata objects for all of the Certs in the specified environment.", + "operationId": "listCerts", + "responses": { + "200": { + "description": "Successfully retrieved a list of Certs for the specified Environment.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Certs" + ], + "summary": "Create a Cert", + "operationId": "createCert", + "description": "Creates a new Cert resource in the specified Environment.\n", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cert" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Cert resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Cert.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/certs/{certName}": { + "get": { + "tags": [ + "Certs" + ], + "summary": "Get a Cert", + "operationId": "getCert", + "description": "Returns information for a specific Cert resource.", + "responses": { + "200": { + "description": "Sucessfully retrieved the requested Cert.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Certs" + ], + "summary": "Upsert a Cert", + "operationId": "updateCert", + "description": "Creates a new Cert or updates an existing Cert resource.", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cert" + } + } + } + }, + "responses": { + "200": { + "description": "Sucessfully updated the specified Cert resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "201": { + "description": "Successfully created the requested Cert resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Certs" + ], + "summary": "Delete a Cert", + "operationId": "deleteCert", + "description": "Deletes the specified Cert resource.", + "responses": { + "204": { + "description": "The specified Cert resource was successfully deleted." + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete the specified Cert resource failed.\nThe Cert is referenced by active objects and cannot be deleted. Delete the referencing objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Cert.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "certName", + "description": "The name of the Cert.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/ResourceName" + } + } + ] + }, + "/services/environments/{environmentName}/devportals": { + "get": { + "tags": [ + "DevPortals" + ], + "summary": "List all DevPortals", + "description": "Returns a list of all DevPortal resources.\n", + "operationId": "ListDevPortals", + "responses": { + "200": { + "description": "Successfully retrieved a list of all DevPortals resources.", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DevPortalsList" + } + } + } + } + }, + "400": { + "description": "Illegal input parameter or malformed URI specified. Check for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "DevPortals" + ], + "summary": "Create DevPortal", + "description": "Creates new Dev Portal resource.", + "operationId": "CreateDevPortal", + "requestBody": { + "description": "A Dev Portal.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created a specified Dev Portal resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "202": { + "description": "Dev Portal resource has been accepted for creation. A Dev Portal will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "400": { + "description": "Illegal input parameter or malformed URI specified. Check for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the DevPortal.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/devportals/{devPortalName}": { + "get": { + "tags": [ + "DevPortals" + ], + "summary": "Get Dev Portal", + "description": "Gets the information for a specific Dev Portal resource.\n", + "operationId": "GetDevPortal", + "responses": { + "200": { + "description": "Successfully retrieved information for the requested Dev Portal resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "400": { + "description": "Illegal input parameter or malformed URI specified. Check for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "DevPortals" + ], + "summary": "Upsert Dev Portal", + "description": "Creates a new Dev Portal resource or updates an existing Dev Portal resource.\n", + "operationId": "UpsertDevPortal", + "requestBody": { + "description": "Defines a Dev Portal resource to create or update.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Dev Portal resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "201": { + "description": "Successfully created the specified Dev Portal resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "202": { + "description": "The Dev Portal resource has been accepted for creation or update. Dev Portal will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "400": { + "description": "Illegal input parameter or malformed URI specified. Check for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "DevPortals" + ], + "summary": "Delete a DevPortal", + "description": "Deletes the specified Dev Portal resource.\nYou must delete all of a Dev Portal's child resources before you delete the Dev Portal.\n", + "operationId": "DeleteDevPortal", + "responses": { + "202": { + "description": "The DevPortal resource has been marked for deletion. DevPortal will be deleted after the underlying resources have been deleted.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified DevPortal resource.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete the specified App resource failed.\nThe App contains references to active objects and cannot be deleted. Delete the child objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the DevPortal.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "devPortalName", + "description": "The name of the DevPortal.", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ] + }, + "/services/environments/{environmentName}/gateways": { + "get": { + "tags": [ + "Gateways" + ], + "summary": "List all Gateways", + "description": "Returns a list of all Gateways in the specified Environment.\n", + "operationId": "listGateways", + "responses": { + "200": { + "description": "Successfully retrieved a list of all Gateways for the specified Environment.", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/GatewayList" + } + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "tags": [ + "Gateways" + ], + "summary": "Create a Gateway", + "description": "Creates a new Gateway resource.\n", + "operationId": "createGateway", + "requestBody": { + "description": "Defines the Gateway resource to create.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "202": { + "description": "The Gateway resource has been accepted for creation. The Gateway will be created after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to create a Gateway resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Gateway resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + } + ] + }, + "/services/environments/{environmentName}/gateways/{gatewayName}": { + "get": { + "tags": [ + "Gateways" + ], + "summary": "Get a Gateway", + "description": "Returns information for the specified Gateway resource.\n", + "operationId": "getGateway", + "responses": { + "200": { + "description": "Successfully retrieved the requested Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "tags": [ + "Gateways" + ], + "summary": "Upsert a Gateway", + "description": "Creates a new Gateway or updates an existing Gateway resource.\n", + "operationId": "updateGateway", + "requestBody": { + "description": "Defines the Gateway resource to create or the updates to make to an existing Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "201": { + "description": "Successfully created the specified Gateway resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "202": { + "description": "The Gateway resource has been accepted for creation or update. The Gateway will be created or updated after it is done configuring.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to update or create a Gateway resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Gateways" + ], + "summary": "Delete a Gateway", + "description": "Deletes the specified Gateway resource.", + "operationId": "deleteGateway", + "responses": { + "202": { + "description": "The Gateway resource has been marked for deletion. The Gateway will be deleted after the underlying resources have been deleted.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "204": { + "description": "Successfully deleted the specified Gateway resource. No content is returned.\n" + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete a Gateway resource failed.\nThe Gateway is referenced by an App Component(s) and cannot be deleted.\nDelete the App Component or remove the reference, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "429": { + "description": "The request to delete a Gateway resource failed due to exceeding request processing threshold.\nAllow the server to process existing requests, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "environmentName", + "description": "The name of the Environment that contains the Gateway resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + { + "name": "gatewayName", + "description": "The name of the Gateway.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/GatewayName" + } + } + ] + }, + "/services/errorsets": { + "get": { + "tags": [ + "Error Sets" + ], + "summary": "List all Error Sets.", + "description": "Returns a list of all the Error Sets.", + "operationId": "listErrorSets", + "responses": { + "200": { + "description": "Successfully retrieved a list of Error Sets.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorSetList" + } + } + } + } + } + } + }, + "/services/errorsets/{errorSetName}": { + "get": { + "tags": [ + "ErrorSets" + ], + "summary": "Get an Error Set.", + "operationId": "getErrorSet", + "description": "Returns the information for a specific Error Set.", + "responses": { + "200": { + "description": "Successfully retrieved the requested Error Set.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorSet" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "errorSetName", + "description": "The name of the Error Set.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/ResourceName" + } + } + ] + }, + "/security/policies": { + "x-f5-experimental": true, + "get": { + "x-f5-experimental": true, + "tags": [ + "Policies" + ], + "summary": "List all Policies", + "description": "Returns a list of Policy metadata objects for all of the Policies.", + "operationId": "listPolicies", + "responses": { + "200": { + "description": "Successfully retrieved a list of Policies.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyList" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "post": { + "x-f5-experimental": true, + "tags": [ + "Policies" + ], + "summary": "Create a Policy", + "operationId": "createPolicy", + "description": "Creates a new Policy resource.\n", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Policy" + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the specified Policy resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request failed due to a naming conflict with an existing resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + } + }, + "/security/policies/{policyName}": { + "get": { + "x-f5-experimental": true, + "tags": [ + "Policies" + ], + "summary": "Get a Policy", + "operationId": "getPolicy", + "description": "Returns information for a specific Policy resource.", + "responses": { + "200": { + "description": "Sucessfully retrieved the requested Policy.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "put": { + "x-f5-experimental": true, + "tags": [ + "Policies" + ], + "summary": "Upsert a Policy", + "operationId": "updatePolicy", + "description": "Creates a new Policy or updates an existing Policy resource.", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Policy" + } + } + } + }, + "responses": { + "200": { + "description": "Sucessfully updated the specified Policy resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "201": { + "description": "Successfully created the requested Policy resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "delete": { + "x-f5-experimental": true, + "tags": [ + "Policies" + ], + "summary": "Delete a Policy", + "operationId": "deletePolicy", + "description": "Deletes the specified Policy resource.", + "responses": { + "204": { + "description": "The specified Policy resource was successfully deleted." + }, + "404": { + "description": "The resource defined in the URI could not be found. Check the URI for errors and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "409": { + "description": "The request to delete the specified Policy resource failed.\nThe Policy is referenced by active objects and cannot be deleted. Delete the referencing objects or remove the references, then try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + } + } + }, + "parameters": [ + { + "name": "policyName", + "description": "The name of the Policy.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/ResourceName" + } + } + ] + } + }, + "components": { + "parameters": { + "LocationName": { + "name": "locationName", + "in": "path", + "description": "The name of the Location that contains the Instance.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "InstanceName": { + "name": "instanceName", + "in": "path", + "description": "The name of the Instance.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "environmentName": { + "name": "environmentName", + "description": "The name of the Environment that contains the Component's parent App.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/EnvironmentName" + } + }, + "appName": { + "name": "appName", + "description": "The name of the App that contains the Component resource.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/AppName" + } + }, + "apiDefinitionName": { + "name": "apiDefinitionName", + "in": "path", + "description": "The name of the API Definition resource.", + "required": true, + "style": "simple", + "explode": false, + "example": "shopping-app-api-def", + "schema": { + "type": "string" + } + }, + "publishedApiName": { + "name": "publishedApiName", + "in": "path", + "description": "The name of the Published API resource.", + "required": true, + "style": "simple", + "explode": false, + "example": "shopping-app-api-staging", + "schema": { + "type": "string" + } + }, + "versionName": { + "name": "version", + "in": "path", + "description": "The Version of the API Definition.", + "required": true, + "style": "simple", + "explode": false, + "example": "v1_2fd4e1c6", + "schema": { + "type": "string" + } + }, + "InstanceGroupName": { + "name": "instanceGroupName", + "in": "path", + "description": "The name of the Instance Group.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "identityProviderName": { + "name": "identityProviderName", + "in": "path", + "description": "The name of the Identity Provider.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "identityProviderClientName": { + "name": "identityProviderClientName", + "in": "path", + "description": "The name of the Identity Provider Client.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "InstanceTemplateName": { + "name": "instanceTemplateName", + "in": "path", + "description": "The name of the Instance Template resource.", + "required": true, + "style": "simple", + "explode": false, + "schema": { + "type": "string" + } + }, + "IntegrationName": { + "name": "integrationName", + "in": "path", + "description": "The name of the Integration resource.", + "required": true, + "schema": { + "type": "string" + } + } + }, + "schemas": { + "SelfLinks": { + "type": "object", + "description": "The SelfLinks object contains a link from the resource to itself.\nThis object is used only in responses.\n", + "properties": { + "rel": { + "type": "string", + "example": "/api/v1/services/environments/prod", + "description": "`rel` contains the complete path fragment of a URI and can be used\nto construct a query to the object.\n" + } + } + }, + "ResourceMeta": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "pattern": "^[^A-Z\\s\\x00-\\x1f\\x60\\x7f\\;\\*\\\"\\[\\]\\{\\}\\\\\\/%\\?:=&\\~\\^|#<>]+$", + "minLength": 1, + "maxLength": 1024, + "example": "resource-name", + "description": "Resource name is a unique identifier for a resource within the context of a namespace.\nResource names must conform to [RFC 1738 Section 2.2](https://www.ietf.org/rfc/rfc1738.txt) and have a valid syntax for email addresses. The following rules are enforced:\n\n- do not utilize URL encoding;\n- do not include spaces;\n- do not use uppercase characters, for example, 'A-Z'; extended character sets are supported;\n- do not use the following characters: `\"`, `*`, `:`, `;`, `/`, `\\`, `%`, `?`, `hash`, `=`, `&`, `|`, `~`, `^`, `{`, `}`, `[`, `]`, `<`, `>`;\n- cannot start or end with an `@` sign;\n\nFor example: For a collection resource located at\n\n`https://controller.example.com/api/v1/services/apps/shopping_@1`\n\nthe resource name is \"shopping_@1\".\n" + }, + "displayName": { + "type": "string", + "example": "My Display Name", + "description": "`displayName` is a user friendly resource name. It can be used to define \na longer, and less constrained, name for a resource.\n\nDisplay names:\n\n- are optional (defaults to an empty string if no value is provided),\n- do not have to be unique, \n- cannot be assigned by the server.\n" + }, + "description": { + "type": "string", + "example": "This is a sample description string. It provides information about the resource.", + "description": "`description` is a free-form text property. You can use it to provide information that helps \nto identify the resource.\n\nDescriptions:\n\n- are optional (defaults to an empty string if no value is provided),\n- do not have to be unique, \n- cannot be assigned by the server.\n" + }, + "kind": { + "type": "string", + "example": "-", + "description": "Kind is a string representation of an API resource's data type.\nIt is assigned by the server and cannot be changed. \n\nWhen creating a `kind`, the server uses hyphens to connect word segments; \nsingleton and collection item resources are not pluralized.\n" + }, + "uid": { + "type": "string", + "format": "uuid", + "example": "d290f1ee-6c54-4b01-90e6-d701748f0851", + "description": "Unique Identifier (UID)\n\nUID is a unique identifier in time and space for a resource. \nWhen you create a resource, the server assigns a UID to the resource.\n\nRefer to [IETF RFC 4122](https://tools.ietf.org/html/rfc4122) for more information.\n" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ], + "description": "You can assign `tags` to a resource as a way to help map, scope, \nand organize resources. \n\nThe system uses tag selectors to specify selection criteria that \nmatch resources that have particular tags.\n" + }, + "ref": { + "type": "string", + "example": "/services/environments/prod", + "description": "The `ref` field contains a reference to another NGINX Controller resource.\n" + }, + "links": { + "$ref": "#/components/schemas/SelfLinks" + }, + "createTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T09:12:33.001Z", + "description": "A timestamp that represents the server time when the resource was created.\n\nCreate time is not guaranteed to be set in \"happens-before\" order\nacross separate operations.\n\nIn JSON format, `create_time` type is encoded as a string in the\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n\nFor example: 2018-04-01T01:30:15.01Z\n\nCreate Time is assigned by the server and cannot be changed.\n" + }, + "updateTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T10:12:33.001Z", + "description": "A timestamp that represents the server time when the resource was last modified.\n\nResources that have never been updated do not have an `update_time` stamp.\n\nThe default value for resources that have never been updated is the local \nlanguage-specific equivalent of \"null\".\n\nIn JSON format, `update_time` type is encoded as a string as described in \n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n" + } + } + }, + "ResourceName": { + "type": "string", + "description": "The name of a resource.", + "example": "production" + }, + "ConfigStateTally": { + "type": "object", + "properties": { + "isConfigured": { + "type": "boolean", + "description": "The configuration operation is complete." + }, + "isConfiguring": { + "type": "boolean", + "description": "The configuration of the resource, or of its child(ren), is in process." + }, + "isError": { + "type": "boolean", + "description": "An error occurred while configuring the resource or its child(ren)." + }, + "isDeleting": { + "type": "boolean", + "description": "A delete operation is in progress for the resource or its child(ren)." + }, + "total": { + "type": "integer", + "description": "The total number of resources to which the configuration operation applies." + }, + "configured": { + "type": "integer", + "description": "The number of resources that have a complete and valid configuration." + }, + "configuring": { + "type": "integer", + "description": "The number of resources that are in the process of being configured." + }, + "error": { + "type": "integer", + "description": "The number of resources that have encountered an error during the configuration process." + }, + "deleting": { + "type": "integer", + "description": "The number of resources that are in the process of being deleted." + } + } + }, + "ConfigCondition": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The condition type." + }, + "message": { + "type": "string", + "description": "A human-readable message that provides additional information about the configuration operation." + } + } + }, + "ConfigState": { + "type": "object", + "description": "A representation of the resource's current configuration state \nthat comprises the status of the resource itself (`selfConfigState`) and any child \nresources (`childrenConfigState`).\n\nThe conditions array provides additional information during configuration changes.\n", + "properties": { + "selfConfigState": { + "$ref": "#/components/schemas/ConfigStateTally" + }, + "childrenConfigState": { + "$ref": "#/components/schemas/ConfigStateTally" + }, + "conditions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ConfigCondition" + } + } + } + }, + "NamedLinks": { + "allOf": [ + { + "$ref": "#/components/schemas/SelfLinks" + }, + { + "type": "object", + "description": "Contains information about the object being referred to.\n\nThese are generally details -- like the object name and display name --\nthat are useful to a consumer of the API that performs further\nprocessing. \n\nThis object is only present in responses.\n \n", + "properties": { + "name": { + "type": "string", + "example": "production", + "description": "The name of the linked resource.\n" + }, + "displayName": { + "type": "string", + "example": "Production Environment", + "description": "A user friendly resource name." + } + } + } + ] + }, + "ResourceRef": { + "type": "object", + "required": [ + "ref" + ], + "properties": { + "ref": { + "type": "string", + "example": "/services/environments/prod", + "description": "A reference to another NGINX Controller resource.\n" + }, + "links": { + "$ref": "#/components/schemas/NamedLinks" + } + } + }, + "ErrorDetail": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string", + "example": "Error doing : . This can lead to . Try to resolve the issue.", + "description": "A detailed error message returned by the server. \n\nThese messages contain the following information, where applicable:\n\n- What happened.\n- Why it happened.\n- What the consequences are (if any).\n- Recommended action to take to resolve the issue.\n" + } + } + }, + "ErrorModel": { + "type": "object", + "required": [ + "message", + "code" + ], + "properties": { + "message": { + "type": "string", + "example": "Error doing .", + "description": "A human-readable message, in English, that describes the error.\n" + }, + "code": { + "type": "integer", + "example": 1234567, + "description": "A numeric error code that can be used to identify errors for support purposes.\n" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorDetail" + } + } + } + }, + "EnvironmentName": { + "type": "string" + }, + "AppName": { + "type": "string" + }, + "Instance": { + "type": "object", + "description": "An NGINX Instance.", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/InstanceCurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/InstanceDesiredState" + } + } + }, + "GetInstanceResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/Instance" + } + ] + }, + "ListInstanceResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "InstanceRequest": { + "allOf": [ + { + "$ref": "#/components/schemas/Instance" + } + ], + "description": "Describes the Instance to update." + }, + "InstanceDesiredState": { + "oneOf": [ + { + "$ref": "#/components/schemas/OtherInstanceDesiredState" + }, + { + "$ref": "#/components/schemas/AWSInstanceDesiredState" + }, + { + "$ref": "#/components/schemas/AzureInstanceDesiredState" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "OTHER_INSTANCE": "#/components/schemas/OtherInstanceDesiredState", + "AWS_INSTANCE": "#/components/schemas/AWSInstanceDesiredState", + "AZURE_INSTANCE": "#/components/schemas/AzureInstanceDesiredState" + } + } + }, + "InstanceCurrentStatus": { + "oneOf": [ + { + "$ref": "#/components/schemas/OtherInstanceCurrentStatus" + }, + { + "$ref": "#/components/schemas/AWSInstanceCurrentStatus" + }, + { + "$ref": "#/components/schemas/AzureInstanceCurrentStatus" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "OTHER_INSTANCE": "#/components/schemas/OtherInstanceCurrentStatus", + "AWS_INSTANCE": "#/components/schemas/AWSInstanceCurrentStatus", + "AZURE_INSTANCE": "#/components/schemas/AzureInstanceCurrentStatus" + } + } + }, + "OtherInstanceDesiredState": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "description": "OTHER_INSTANCE is an Instance pre-installed and self-registered during NGINX installation.\n", + "enum": [ + "OTHER_INSTANCE" + ] + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + } + } + }, + "AWSInstanceDesiredState": { + "type": "object", + "required": [ + "type", + "templateRef" + ], + "properties": { + "type": { + "type": "string", + "description": "AWS_INSTANCE is an Instance hosted in Amazon Web Services (AWS).\n", + "enum": [ + "AWS_INSTANCE" + ] + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + } + } + }, + "AzureInstanceDesiredState": { + "x-f5-experimental": true, + "type": "object", + "required": [ + "type", + "templateRef" + ], + "properties": { + "type": { + "type": "string", + "description": "AZURE_INSTANCE is an Instance hosted in Microsoft Azure.", + "enum": [ + "AZURE_INSTANCE" + ] + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + } + } + }, + "OtherInstanceCurrentStatus": { + "type": "object", + "description": "Contains the current status of the Other Instance.", + "required": [ + "type", + "hostname", + "version", + "agent", + "status", + "muted" + ], + "properties": { + "hostname": { + "type": "string", + "description": "The hostname of the Instance." + }, + "version": { + "type": "string", + "description": "The version of NGINX running on the Instance." + }, + "muted": { + "type": "boolean", + "description": "Indicates the status of notifications and alerts.\n- 'True' means that notifications and alerts are muted.\n- 'False' means that notifications and alerts are active.\n", + "deprecated": true + }, + "type": { + "type": "string", + "description": "OTHER_INSTANCE is an Instance pre-installed and self-registered during NGINX installation.\n", + "enum": [ + "OTHER_INSTANCE" + ] + }, + "networkConfig": { + "$ref": "#/components/schemas/OtherNetworkConfig" + }, + "agent": { + "$ref": "#/components/schemas/Agent" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + }, + "status": { + "deprecated": true, + "allOf": [ + { + "$ref": "#/components/schemas/ConfigState" + } + ] + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "legacyNginxMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacyNginxMetadata" + }, + "legacySystemMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacySystemMetadata" + } + } + }, + "AWSInstanceCurrentStatus": { + "type": "object", + "description": "Contains the current status of the AWS Instance.", + "required": [ + "type", + "hostname", + "version", + "agent", + "muted" + ], + "properties": { + "hostname": { + "type": "string", + "description": "The hostname of the Instance." + }, + "instanceID": { + "type": "string", + "description": "The ID of the Instance." + }, + "version": { + "type": "string", + "description": "The version of NGINX running on the Instance." + }, + "muted": { + "type": "boolean", + "description": "Indicates the status of notifications and alerts.\n- 'True' means that notifications and alerts are muted.\n- 'False' means that notifications and alerts are active.\n", + "deprecated": true + }, + "type": { + "type": "string", + "description": "AWS_INSTANCE is an Instance hosted in Amazon Web Services (AWS).\n", + "enum": [ + "AWS_INSTANCE" + ] + }, + "networkConfig": { + "$ref": "#/components/schemas/AWSNetworkConfig" + }, + "agent": { + "$ref": "#/components/schemas/Agent" + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "legacyNginxMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacyNginxMetadata" + }, + "legacySystemMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacySystemMetadata" + } + } + }, + "AzureInstanceCurrentStatus": { + "x-f5-experimental": true, + "type": "object", + "description": "Contains the current status of the Azure Instance.", + "required": [ + "type", + "hostname", + "version", + "agent" + ], + "properties": { + "hostname": { + "type": "string", + "description": "The hostname of the Instance." + }, + "instanceID": { + "type": "string", + "description": "The ID of the Instance." + }, + "version": { + "type": "string", + "description": "The version of NGINX running on the Instance." + }, + "muted": { + "type": "boolean", + "description": "Indicates the mute status of notifications and alerts.\n- 'True' means that notifications and alerts are muted.\n- 'False' means that notifications and alerts are active.\n", + "deprecated": true + }, + "type": { + "type": "string", + "description": "AZURE_INSTANCE is an Instance hosted in Microsoft Azure.", + "enum": [ + "AZURE_INSTANCE" + ] + }, + "networkConfig": { + "$ref": "#/components/schemas/AzureNetworkConfig" + }, + "agent": { + "$ref": "#/components/schemas/Agent" + }, + "templateRef": { + "$ref": "#/components/schemas/TemplateRef" + }, + "nginx": { + "$ref": "#/components/schemas/Nginx" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "legacyNginxMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacyNginxMetadata" + }, + "legacySystemMetadata": { + "deprecated": true, + "$ref": "#/components/schemas/LegacySystemMetadata" + } + } + }, + "TemplateRef": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/ResourceRef" + } + ], + "description": "Reference to an existing Instance Template resource. This field cannot be updated." + }, + "LegacySystemMetadata": { + "type": "object", + "deprecated": true, + "description": "Metadata that describe the operating system attributes and properties of an Instance host system. It is\nintended for internal use only and is subject to change.\n", + "additionalProperties": true + }, + "LegacyNginxMetadata": { + "type": "object", + "deprecated": true, + "description": "Metadata that describe an Instance's NGINX process configuration and properties. It is intended\nfor internal use only and is subject to change.\n", + "additionalProperties": true + }, + "Agent": { + "type": "object", + "description": "The properties of the Controller Agent running on the Instance", + "required": [ + "version" + ], + "properties": { + "version": { + "type": "string", + "description": "The version of Controller Agent that is currently running on the Instance." + }, + "online": { + "type": "boolean", + "description": "The status of Controller Agent that is currently running on the Instance." + }, + "credentials": { + "$ref": "#/components/schemas/AgentCredentials" + } + } + }, + "AgentCredentials": { + "type": "object", + "description": "The credentials of the Controller Agent running on the Instance.", + "properties": { + "hostname": { + "type": "string", + "description": "The hostname of the Agent." + }, + "uuid": { + "type": "string", + "description": "The uuid of the Agent." + } + } + }, + "OtherNetworkConfig": { + "type": "object", + "description": "The network config of a customer deployed Instance.", + "properties": { + "networkInterfaces": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OtherNetworkInterface" + } + } + } + }, + "OtherNetworkInterface": { + "type": "object", + "description": "A network interface for a customer deployed Instance.", + "properties": { + "name": { + "type": "string", + "description": "The name of the network interface attached to the Instance." + }, + "privateDnsName": { + "type": "string", + "description": "The private, internal hostname of the instance, which resolves to the Instance's private IP address.\n" + }, + "privateIP": { + "type": "string", + "description": "The private IP address of the network interface." + }, + "privateIPv6": { + "type": "string", + "description": "The private IPv6 address of the network interface." + }, + "alternateIPList": { + "type": "array", + "items": { + "type": "string" + } + }, + "alternateIPv6List": { + "type": "array", + "items": { + "type": "string" + } + }, + "subnet": { + "type": "object", + "description": "The subnet that contains the interface.", + "properties": { + "cidrIPv4": { + "type": "string", + "description": "The IPv4 CIDR for the subnet." + }, + "cidrIPv6": { + "type": "string", + "description": "The IPv6 CIDR for the subnet." + } + } + } + } + }, + "AWSNetworkConfig": { + "type": "object", + "description": "The network config of an AWS Instance.", + "properties": { + "networkInterfaces": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AWSNetworkInterface" + } + } + } + }, + "AWSNetworkInterface": { + "type": "object", + "description": "A network interface for an AWS Instance.", + "properties": { + "name": { + "type": "string", + "description": "The name of the network interface attached to the Instance." + }, + "privateDnsName": { + "type": "string", + "description": "The private, internal hostname of the instance, which resolves to the Instance's private IP address. \n" + }, + "publicDnsName": { + "type": "string", + "description": "The public hostname of the instance, which resolves to the public IP address of the Instance. \n" + }, + "privateIP": { + "type": "string", + "description": "The private IP address of the network interface." + }, + "publicIP": { + "type": "string", + "description": "The public IP address of the network interface." + }, + "subnet": { + "type": "object", + "description": "The subnet that contains the interface.", + "properties": { + "subnetID": { + "type": "string", + "description": "The ID of the subnet into which the instance was launched." + }, + "cidrIPv4": { + "type": "string", + "description": "The IPv4 CIDR for the subnet." + }, + "cidrIPv6": { + "type": "string", + "description": "The IPv6 CIDR for the subnet." + } + } + } + } + }, + "AzureNetworkConfig": { + "type": "object", + "description": "The network config of an Azure Instance.", + "properties": { + "networkInterfaces": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AzureNetworkInterface" + } + } + } + }, + "AzureNetworkInterface": { + "type": "object", + "description": "A network interface associated with an Azure Instance.", + "properties": { + "name": { + "type": "string", + "description": "The name of the network interface attached to the Instance." + }, + "publicDnsName": { + "type": "string", + "description": "The public hostname of the instance, which resolves to the public IP address of the Instance. \n" + }, + "privateIP": { + "type": "string", + "description": "The private IP address of the network interface." + }, + "publicIP": { + "type": "string", + "description": "The public IP address of the network interface." + }, + "privateIPv6": { + "type": "string", + "description": "The private IPv6 address of the network interface." + }, + "publicIPv6": { + "type": "string", + "description": "The public IPv6 address of the network interface." + }, + "subnet": { + "type": "object", + "description": "The subnet that contains the interface.", + "properties": { + "subnetID": { + "type": "string", + "description": "The ID of the subnet that contains the Instance." + }, + "cidrIPv4": { + "type": "string", + "description": "The IPv4 CIDR for the subnet." + }, + "cidrIPv6": { + "type": "string", + "description": "The IPv6 CIDR for the subnet." + } + } + } + } + }, + "Nginx": { + "type": "object", + "description": "Defines properties and configuration values for Nginx.\n", + "properties": { + "process": { + "$ref": "#/components/schemas/NginxProcess" + } + } + }, + "NginxProcess": { + "type": "object", + "description": "Defines configuration directives that are defined in the main configuration context.\n", + "properties": { + "user": { + "type": "string", + "description": "Defines user credentials used by worker processes.\n", + "default": "nginx" + }, + "group": { + "type": "string", + "description": "Defines group credentials used by worker processes. This will be ignored if the user\nproperty is not defined.\n", + "default": "nginx" + } + } + }, + "APIDefinition": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/APIDefinitionCurrentStatus" + } + } + }, + "APIDefinitionCurrentStatus": { + "type": "object", + "properties": { + "apiDefinitionVersionRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "APIDefinitionList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/APIDefinition" + } + } + } + }, + "APIDefinitionSpecMapping": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/components/schemas/RESTAPISpec" + }, + { + "$ref": "#/components/schemas/gRPCProxySpec" + }, + { + "$ref": "#/components/schemas/SOAPAPISpec" + } + ] + } + }, + "RESTAPISpec": { + "type": "object", + "description": "Validates an Imported OpenAPI 3 spec formatted as JSON using the [OAS v3 schema.yaml](https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v3.0/schema.yaml) specification.\n" + }, + "SOAPAPISpec": { + "type": "object", + "description": "Validates and Transforms an Imported WSDL spec.\n", + "properties": { + "wsdl": { + "type": "string" + }, + "soap-config": { + "$ref": "#/components/schemas/SoapConfig" + } + }, + "required": [ + "soap-config" + ] + }, + "SoapConfig": { + "type": "object", + "description": "Transformed WSDL to OpenAPI spec config.\n", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "tns": { + "type": "string" + }, + "qualifiedFields": { + "type": "boolean" + }, + "endpoints": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/SoapConfigEndpoint" + } + }, + "types": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/SoapConfigWSDLType" + } + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/SoapConfigMessage" + } + } + }, + "required": [ + "name", + "url", + "tns", + "endpoints", + "types", + "messages", + "qualifiedFields" + ] + }, + "SoapConfigEndpoint": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "in": { + "type": "string" + }, + "out": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "restHTTPMethod": { + "type": "string", + "enum": [ + "get", + "head", + "post", + "put", + "patch", + "delete" + ] + }, + "restURI": { + "type": "string" + } + }, + "required": [ + "name", + "in", + "out", + "enabled", + "restHTTPMethod", + "restURI" + ] + }, + "SoapConfigMessage": { + "type": "object", + "description": "Validates and Transforms an Imported WSDL spec.\n", + "properties": { + "name": { + "type": "string" + }, + "parts": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/SoapConfigMessagePart" + } + } + }, + "required": [ + "name", + "parts" + ] + }, + "SoapConfigMessagePart": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "restName": { + "type": "string" + }, + "type": { + "type": "string" + }, + "ref": { + "type": "string" + } + }, + "required": [ + "name", + "restName" + ] + }, + "SoapConfigWSDLType": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string" + }, + "properties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SoapConfigNestedWSDLType" + } + }, + "hasOneOfProperties": { + "type": "boolean" + }, + "hasOrderedProperties": { + "type": "boolean" + }, + "enum": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "name", + "type", + "hasOneOfProperties", + "hasOrderedProperties" + ] + }, + "SoapConfigNestedWSDLType": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "restName": { + "type": "string" + }, + "type": { + "type": "string" + }, + "ref": { + "type": "string" + }, + "isRequired": { + "type": "boolean" + }, + "isArray": { + "type": "boolean" + } + }, + "required": [ + "name", + "restName", + "isRequired", + "isArray" + ] + }, + "gRPCProxySpec": { + "x-f5-experimental": true, + "type": "object", + "description": "Validates an Imported gRPC spec.\n" + }, + "APIDefinitionVersion": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/APIDefinitionVersionMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/APIDefinitionVersionDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/APIDefinitionVersionCurrentStatus" + } + } + }, + "APIDefinitionVersionList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/APIDefinitionVersion" + } + } + } + }, + "APIDefinitionVersionDesiredState": { + "type": "object", + "properties": { + "specs": { + "$ref": "#/components/schemas/APIDefinitionSpecMapping" + } + } + }, + "APIDefinitionVersionCurrentStatus": { + "type": "object", + "properties": { + "specs": { + "$ref": "#/components/schemas/APIDefinitionSpecMapping" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "publishedApiRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "apiDefinitionVersionRef": { + "type": "object", + "properties": { + "ref": { + "type": "string", + "description": "Reference to the Version of the API Definition.\n", + "example": "/services/api-definitions/baseball-stats/versions/v1" + }, + "links": { + "$ref": "#/components/schemas/NamedLinks" + } + } + }, + "APIDefinitionVersionMeta": { + "allOf": [ + { + "$ref": "#/components/schemas/ResourceMeta" + }, + { + "type": "object", + "properties": { + "isDefaultVersion": { + "type": "boolean" + } + } + } + ] + }, + "EnvironmentList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Environment" + } + } + } + }, + "Environment": { + "type": "object", + "description": "An Environment is a logical container that you can use to organize your Apps. A few commonly-used examples of Environments are \"dev\" and \"production\".", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "x-f5-experimental": true, + "type": "object" + }, + "currentStatus": { + "$ref": "#/components/schemas/EnvironmentCurrentStatus" + } + } + }, + "EnvironmentCurrentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "gatewayRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "appRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "certRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "DevPortalsList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DevPortal" + } + } + } + }, + "DevPortal": { + "type": "object", + "description": "A Dev Portal.", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/DevPortalDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/DevPortalCurrentStatus" + } + }, + "additionalProperties": false + }, + "DevPortalDesiredState": { + "type": "object", + "required": [ + "ingress" + ], + "properties": { + "ingress": { + "type": "object", + "required": [ + "gatewayRefs" + ], + "properties": { + "gatewayRefs": { + "type": "array", + "description": "Reference to the Gateways that act as a Developer Portal.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + }, + "additionalProperties": false + }, + "devPortalTheme": { + "$ref": "#/components/schemas/DevPortalTheme" + }, + "publishedApiRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "devPortalType": { + "type": "string", + "enum": [ + "private", + "public", + "partner" + ] + } + }, + "additionalProperties": false + }, + "DevPortalCurrentStatus": { + "type": "object", + "properties": { + "ingress": { + "type": "object", + "required": [ + "gatewayRefs" + ], + "properties": { + "gatewayRefs": { + "type": "array", + "description": "Reference to the Gateways that act as a Developer Portal.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "devPortalTheme": { + "$ref": "#/components/schemas/DevPortalTheme" + }, + "publishedApiRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "publishedTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T10:12:33.001Z", + "description": "Published time is a timestamp that represents the server time when the resource was published.\nResources that have never been published do not have an `published_time` stamp.\nThe default value is language-specific and, in general, should be equivalent of the null construct.\nIn JSON format, `published_time` type is encoded as a string as described in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n\nFor example: 2018-04-01T01:30:15.01Z\n" + } + } + }, + "DevPortalTheme": { + "description": "Specifies the theming for the Developer Portal.", + "type": "object", + "properties": { + "overrideDefaultTheme": { + "type": "boolean", + "example": false, + "description": "Override the default Dev Portal theme with a custom theme." + }, + "customConfig": { + "type": "object", + "properties": { + "primary": { + "$ref": "#/components/schemas/ThemeConfig" + }, + "secondary": { + "$ref": "#/components/schemas/ThemeConfig" + }, + "fonts": { + "type": "object", + "properties": { + "assignments": { + "$ref": "#/components/schemas/FontAssignments" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + "brandName": { + "$ref": "#/components/schemas/BrandName" + }, + "logo": { + "$ref": "#/components/schemas/FileEncodedString" + }, + "defaultLogo": { + "$ref": "#/components/schemas/FileEncodedString" + }, + "favicon": { + "$ref": "#/components/schemas/FileEncodedString" + } + }, + "additionalProperties": false + }, + "ThemeConfig": { + "type": "object", + "properties": { + "color": { + "$ref": "#/components/schemas/ThemeConfigColors" + } + }, + "additionalProperties": false + }, + "ThemeConfigColors": { + "type": "object", + "properties": { + "primary": { + "type": "string", + "example": "#575fe6", + "description": "A CSS color string used as a primary brand theme color." + }, + "accent": { + "type": "string", + "example": "#48dbac", + "description": "A CSS color string used as an optional second brand theme color." + }, + "gray": { + "type": "string", + "example": "#1e1f27", + "description": "A CSS color string used to generate a grayscale color palette." + }, + "link": { + "type": "string", + "example": "#0f55bd", + "description": "A CSS color string used to generate anchor link colors." + }, + "fill": { + "type": "string", + "example": "#fafbfc", + "description": "A CSS color string used as the main background color." + }, + "ink": { + "type": "string", + "example": "#323441", + "description": "A CSS color string used as the main text color." + }, + "status": { + "$ref": "#/components/schemas/ThemeConfigStatusColors" + } + }, + "additionalProperties": false + }, + "ThemeConfigStatusColors": { + "type": "object", + "description": "A collection of CSS color strings used to indicate status.", + "properties": { + "info": { + "type": "string", + "example": "#20a9ea", + "description": "A CSS color string used to indicate an informational status." + }, + "success": { + "type": "string", + "example": "#37c497", + "description": "A CSS color string used to indicate a success status." + }, + "error": { + "type": "string", + "example": "#ed4f54", + "description": "A CSS color string used to indicate an error status." + }, + "warning": { + "type": "string", + "example": "#ffb900", + "description": "A CSS color string used to indicate a warning status." + } + }, + "additionalProperties": false + }, + "ThemeConfigFonts": { + "type": "object", + "description": "A collection of fonts for theming typography.", + "properties": { + "headings": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for headlines." + }, + "body": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for body copy." + }, + "cta": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for calls to action." + }, + "code": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for code and syntax highlighting." + }, + "special": { + "allOf": [ + { + "$ref": "#/components/schemas/ThemeConfigFont" + } + ], + "description": "A font used for special accent typography." + }, + "baseFontSize": { + "$ref": "#/components/schemas/BaseFontSize" + }, + "embeddedLink": { + "$ref": "#/components/schemas/EmbeddedLink" + } + }, + "additionalProperties": false + }, + "ThemeConfigFont": { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "google-web-font" + ] + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false + }, + "FileEncodedString": { + "type": "string", + "example": "c29tZXRoaW5nIA==", + "description": "Base64 encoded string of a logo." + }, + "BrandName": { + "type": "string", + "example": "Acme", + "description": "Name of the brand." + }, + "BaseFontSize": { + "type": "integer", + "example": 14 + }, + "EmbeddedLink": { + "type": "string" + }, + "FontAssignments": { + "type": "object", + "$ref": "#/components/schemas/ThemeConfigFonts" + }, + "ListInstanceGroupsResponse": { + "x-f5-experimental": true, + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceGroup" + } + } + } + }, + "GetInstanceGroupResponse": { + "x-f5-experimental": true, + "allOf": [ + { + "$ref": "#/components/schemas/InstanceGroup" + } + ] + }, + "InstanceGroup": { + "x-f5-experimental": true, + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/InstanceGroupState" + }, + "currentStatus": { + "$ref": "#/components/schemas/InstanceGroupStatus" + } + } + }, + "InstanceGroupState": { + "x-f5-experimental": true, + "type": "object", + "properties": { + "bigIpIntegration": { + "$ref": "#/components/schemas/BigIpIntegration" + } + } + }, + "InstanceGroupStatus": { + "x-f5-experimental": true, + "type": "object", + "required": [ + "state" + ], + "properties": { + "instanceRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceRef" + } + }, + "bigIpIntegration": { + "$ref": "#/components/schemas/BigIpIntegration" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + }, + "InstanceRef": { + "x-f5-experimental": true, + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/ResourceRef" + } + ], + "description": "Reference to a member Instance resource." + }, + "BigIpIntegration": { + "x-f5-experimental": true, + "type": "object", + "properties": { + "integrationRef": { + "description": "Reference to a BIG-IP Integration object, indicating that the Instances will be members of a BIG-IP server pool.", + "$ref": "#/components/schemas/ResourceRef" + }, + "serverPoolIp": { + "description": "The Instance IP address or CIDR to use when the Instance is a member of a BIG-IP server pool.\nIf this is a CIDR, then the Instance IP address that matches the mask will be the member address in the BIG-IP server pool.\nOtherwise, if this is an absolute IP address, that will be used as the server-pool member address.\n", + "type": "string" + } + } + }, + "IdentityProviderList": { + "type": "object", + "description": "Contains a list of Identity Provider resources.", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IdentityProvider" + } + } + } + }, + "APIKeyIdentityProvider": { + "required": [ + "type" + ], + "type": "object", + "description": "Use an API key for authentication.\n\n> **Note:** Use of API Key authN is not recommended in production environments.\n", + "properties": { + "type": { + "type": "string", + "enum": [ + "API_KEY" + ] + } + } + }, + "JWTIdentityProvider": { + "required": [ + "jwkFile", + "type" + ], + "type": "object", + "description": "Use a JWT for authentication.", + "properties": { + "type": { + "type": "string", + "enum": [ + "JWT" + ] + }, + "jwkFile": { + "description": "Provide the path to - or URI for - a `.jwk` file to use for authentication.\nYou can also provide the `.jwk` file contents inline.\n", + "discriminator": { + "propertyName": "type", + "mapping": { + "INLINE": "#/components/schemas/JWKInline", + "REMOTE_FILE": "#/components/schemas/JWKRemoteFile" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/JWKInline" + }, + { + "$ref": "#/components/schemas/JWKRemoteFile" + } + ] + } + } + }, + "IdentityProviderDesiredState": { + "required": [ + "environmentRefs", + "identityProvider" + ], + "type": "object", + "properties": { + "environmentRefs": { + "description": "The Enviroment associated with the Identity Provider.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "identityProvider": { + "$ref": "#/components/schemas/IdentityProviderData" + } + }, + "example": { + "environmentRefs": [ + { + "ref": "/services/environments/env1" + } + ], + "identityProvider": { + "type": "JWT", + "jwkFile": { + "type": "REMOTE_FILE", + "uri": "https://example.com/keys.jwk", + "cacheExpire": "10h" + } + } + } + }, + "IdentityProviderCurrentStatus": { + "required": [ + "environmentRefs", + "identityProvider", + "state" + ], + "type": "object", + "properties": { + "environmentRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "identityProvider": { + "$ref": "#/components/schemas/IdentityProviderData" + } + } + }, + "IdentityProvider": { + "required": [ + "desiredState", + "metadata" + ], + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/IdentityProviderCurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/IdentityProviderDesiredState" + } + } + }, + "IdentityProviderData": { + "description": "The means of authentication used by the Identity Provider (JWT or APIKey).", + "oneOf": [ + { + "$ref": "#/components/schemas/JWTIdentityProvider" + }, + { + "$ref": "#/components/schemas/APIKeyIdentityProvider" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "API_KEY": "#/components/schemas/APIKeyIdentityProvider", + "JWT": "#/components/schemas/JWTIdentityProvider" + } + } + }, + "IdentityProviderClientDesiredState": { + "type": "object", + "properties": { + "credential": { + "$ref": "#/components/schemas/IdentityProviderClientCredential" + } + } + }, + "IdentityProviderClientCurrentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "credential": { + "$ref": "#/components/schemas/IdentityProviderClientCredential" + } + } + }, + "IdentityProviderClient": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/IdentityProviderClientCurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/IdentityProviderClientDesiredState" + } + } + }, + "IdentityProviderClientCredential": { + "oneOf": [ + { + "$ref": "#/components/schemas/IdentityProviderAPIKey" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "API_KEY": "#/components/schemas/IdentityProviderAPIKey" + } + } + }, + "IdentityProviderClientList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IdentityProviderClient" + } + } + } + }, + "UpdateIdentityProviderClient": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/IdentityProviderClientCurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/IdentityProviderClientDesiredState" + } + } + }, + "JWK": { + "type": "object", + "properties": { + "kty": { + "type": "string", + "description": "The cryptographic algorithm family used with the key, such as \"RSA\" or \"EC\"." + }, + "use": { + "type": "string", + "description": "The intended use of the public key, whether for encrypting data or verifying the signature on data." + }, + "key_ops": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The operation(s) for which the key is intended to be used." + }, + "alg": { + "type": "string", + "description": "The algorithm intended for use with the key." + }, + "kid": { + "type": "string", + "description": "The key ID used to match a specific key." + }, + "x5u": { + "type": "string", + "description": "The X.509 URL that refers to a resource for an X.509 public key certificate or certificate chain." + }, + "x5c": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The X.509 Certificate Chain of one or more PKIX certificates. The PKIX certificate containing the key value MUST be the first certificate." + }, + "x5t": { + "type": "string", + "description": "The X.509 Certificate SHA-1 Thumbprint (a.k.a. digest) of the DER encoding of an X.509 certificate." + }, + "x5t256": { + "type": "string", + "description": "The X.509 Certificate SHA-256 Thumbprint (a.k.a. digest) of the DER encoding of an X.509 certificate." + }, + "p2s": { + "type": "string", + "description": "The salt input value for PBES2 key encryption, which is used as part of the PBKDF2 salt value." + }, + "p2c": { + "type": "string", + "description": "The PBKDF2 iteration count for PBES2 key encryption, represented as a positive JSON integer. The iteration count adds computational expense, ideally compounded by the possible range of keys introduced by the salt. A minimum iteration count of 1000 is RECOMMENDED." + }, + "crv": { + "type": "string", + "description": "The cryptographic curve used for an Elliptic Curve public key." + }, + "x": { + "type": "string", + "description": "The x coordinate of the point for an Elliptic Curve public key." + }, + "y": { + "type": "string", + "description": "The y coordinate of the point for an Elliptic Curve public key." + }, + "e": { + "type": "string", + "description": "The exponent value for an RSA public key." + }, + "exp": { + "type": "string", + "description": "The exponent value for an RSA public key." + }, + "n": { + "type": "string", + "description": "The modulus value for an RSA public key." + }, + "mod": { + "type": "string", + "description": "The modulus value for an RSA public key." + }, + "d": { + "type": "string", + "description": "The private key value for an Elliptic Curve private key OR the private exponent value for an RSA private key." + }, + "p": { + "type": "string", + "description": "The first prime factor for an RSA private key." + }, + "q": { + "type": "string", + "description": "The second prime factor for an RSA private key." + }, + "dp": { + "type": "string", + "description": "The Chinese Remainder Theorem (CRT) exponent of the first factor for an RSA private key." + }, + "dq": { + "type": "string", + "description": "The CRT exponent of the second factor for an RSA private key." + }, + "qi": { + "type": "string", + "description": "The CRT coefficient of the second factor for an RSA private key." + }, + "oth": { + "description": "An array of information about any third and subsequent primes, should they exist.", + "type": "array", + "items": { + "type": "object", + "properties": { + "r": { + "type": "string", + "description": "The prime factor." + }, + "d": { + "type": "string", + "description": "The factor CRT exponent of the corresponding prime factor." + }, + "t": { + "type": "string", + "description": "The factor CRT coefficient of the corresponding prime factor." + } + } + } + }, + "iv": { + "type": "string", + "description": "The base64url-encoded representation of the 96-bit Initialization Vector value used for the AES GCM key encryption operation." + }, + "tag": { + "type": "string", + "description": "The base64url-encoded representation of the 128-bit Authentication Tag value resulting from the AES GCM key encryption operation." + }, + "k": { + "type": "string", + "description": "The key value of the symmetric (or other single-valued) key." + }, + "enc": { + "type": "string", + "description": "The encryption algorithm for JWE." + }, + "epk": { + "type": "object", + "description": "The ephemeral public key value created by the originator for use in ECDH-ES key agreement algorithms." + }, + "apu": { + "type": "string", + "description": "The agreement PartyUInfo for ECDH-ES key agreement algorithms, containing information about the producer." + }, + "apv": { + "type": "string", + "description": "The agreement PartyVInfo for ECDH-ES key agreement algorithms." + } + } + }, + "JWKInline": { + "required": [ + "type", + "keys" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "INLINE" + ] + }, + "keys": { + "type": "array", + "description": "The JSON Web Keys.\n", + "items": { + "$ref": "#/components/schemas/JWK" + }, + "example": [ + { + "k": "ZmFudGFzdGljand0", + "kty": "oct", + "kid": 1 + } + ] + } + }, + "description": "Inline contents of a JWK JSON file.\n" + }, + "JWKRemoteFile": { + "required": [ + "uri", + "type" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "REMOTE_FILE" + ] + }, + "uri": { + "type": "string" + }, + "cacheExpire": { + "pattern": "^[0-9]+[h|m|s]{1}$", + "type": "string", + "description": "The length of time for which to cache the remote file.\nNGINX will retrieve the file from the source URI when the cache time expires.\n", + "example": "10h" + } + } + }, + "IdentityProviderAPIKey": { + "required": [ + "type" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "API_KEY" + ] + }, + "apiKey": { + "type": "string", + "description": "If left empty, a key will automatically be generated.\nThe apikey must contain only alphanumeric characters, underscores, and hyphens.\nThe length of the apikey must be between 8 - 256 characters.\n", + "example": "ADv-2ZheQnL_jVx5klhQ39" + } + } + }, + "CertMetadata": { + "type": "object", + "description": "Public certificate metadata.", + "required": [ + "authorityKeyIdentifier", + "commonName", + "expired", + "expiry", + "issuer", + "publicKeyType", + "serialNumber", + "signatureAlgorithm", + "subject", + "subjectAlternativeName", + "subjectKeyIdentifier", + "thumbprint", + "thumbprintAlgorithm", + "validFrom", + "validTo", + "version" + ], + "properties": { + "authorityKeyIdentifier": { + "type": "string", + "example": "2B D0 69 47 94 76 09 FE F4 6B 8D 2E 40 A6 F7 47 4D 7F 08 5E", + "description": "The identifier of the signing authority for the certificate." + }, + "commonName": { + "type": "string", + "example": "www.myapp.com", + "description": "The Common Name (CN) for the certificate. This is typically a Fully Qualified Domain Name (FQDN), and must be the same as the web address users access when connecting to a web site." + }, + "expired": { + "type": "boolean", + "example": false, + "description": "Indicates the expiration status of the certificate." + }, + "expiry": { + "type": "integer", + "example": 35500034, + "description": "The number of seconds until the certificate will expire." + }, + "issuer": { + "type": "string", + "example": "DigiCert Class 3 Extended Validation SSL SGC CA.", + "description": "Identifies the entity who signed and issued the certificate." + }, + "publicKeyType": { + "type": "string", + "example": "RSA (2048 Bits)", + "description": "Identifies the encryption algorithm used to create the public key for the ceritficate." + }, + "serialNumber": { + "type": "string", + "example": "16469416336579571270", + "description": "A unique identifier for the certificate." + }, + "signatureAlgorithm": { + "type": "string", + "example": "SHA-256", + "description": "Identifies the algorithm used to sign the certificate." + }, + "subject": { + "type": "string", + "example": "www.myapp.com", + "description": "Contains the Distinguished Name (DN) information for the certificate." + }, + "subjectAlternativeName": { + "type": "string", + "example": "DNS Name=static.xxxx", + "description": "Defines additional identifies bound to the subject of the certificate. For example, the DNS name is used to add addtional domain names to a certificate." + }, + "subjectKeyIdentifier": { + "type": "string", + "example": "31 EA 76 A9 23 74 A5 DF D4 FD EE A0 C1 A6 9E C6 11 0E 11 EC", + "description": "A hash value of the SSL certificate that can be used to identify certificates that contain a particular public key." + }, + "thumbprint": { + "type": "string", + "example": "E6 A7 87 96 E0 C7 A3 E5 43 78 35 CA 16 78 5B 48 5A A9 DD C4 5C CD 0A 65 AA 89 33 E3 C3 D0 89 71", + "description": "A hash to ensure that the certificate has not been modified." + }, + "thumbprintAlgorithm": { + "type": "string", + "example": "SHA-1", + "description": "Defines the algorithm used to hash the certificate." + }, + "validFrom": { + "type": "string", + "example": "2019-07-29T09:12:33.001Z", + "description": "The start of the validity period for the certificate." + }, + "validTo": { + "type": "string", + "example": "2029-07-29T09:12:33.001Z", + "description": "The end of the validity period for the certificate." + }, + "version": { + "type": "integer", + "example": 3, + "description": "The version of the certificate, typically 3 for X.509 certificates." + } + } + }, + "CertDesiredState": { + "type": "object", + "discriminator": { + "propertyName": "type", + "mapping": { + "PEM": "#/components/schemas/PEM", + "PKCS12": "#/components/schemas/PKCS12", + "REMOTE_FILE": "#/components/schemas/RemoteFile" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/PEM" + }, + { + "$ref": "#/components/schemas/PKCS12" + }, + { + "$ref": "#/components/schemas/RemoteFile" + } + ] + }, + "CertList": { + "type": "object", + "required": [ + "items" + ], + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CertStatus" + } + } + } + }, + "CertStatus": { + "type": "object", + "required": [ + "metadata", + "desiredState", + "currentStatus" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/CertDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/CertCurrentStatus" + } + } + }, + "CertCurrentStatus": { + "type": "object", + "description": "'Shows the current status of the certificate.\n\nWhen any certificates have expired, the Certs service sets `state.selfConfigState.isConfigured` and `state.selfConfigState.isError` to `true`. The service will also add a value to the conditons array with the type \"expiration\" and a message that shows when the first certificate will expire. For example, `conditions: [{type: \"expiration\", message: \"Certificate www.example.com will expire in 29 days.\"}])`'\n", + "required": [ + "state", + "certMetadata", + "type" + ], + "properties": { + "type": { + "type": "string" + }, + "privateKey": { + "type": "string" + }, + "publicCert": { + "type": "string" + }, + "data": { + "type": "string" + }, + "password": { + "type": "string" + }, + "caCerts": { + "type": "array", + "items": { + "type": "string" + } + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "certMetadata": { + "type": "array", + "description": "Public certificate metadata.", + "items": { + "$ref": "#/components/schemas/CertMetadata" + } + } + } + }, + "Cert": { + "type": "object", + "description": "Contains the certificate to upload.", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/CertDesiredState" + } + } + }, + "PEM": { + "type": "object", + "description": "Defines a PEM-formatted certificate that contains a key and certificates.\n\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\nThe private key data will be redacted in the response for all get and list requests.\n", + "required": [ + "privateKey", + "publicCert", + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "PEM" + ] + }, + "password": { + "type": "string", + "example": "myPa$$w0rd", + "description": "The passphrase to use to decrypt the private key. Required if the private key is encrypted." + }, + "privateKey": { + "type": "string", + "example": "-----BEGIN PRIVATE KEY-----\\n MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALSQBtRafNJtTqN0\\n nYiZq6TZUsHjfG2R9PlK6jsvno9O6amN96Al6ZSTTDjhr4VU7/RJ0p/cisiCboCX\\n 4cCq6lFKpIpeZJI=\\n -----END PRIVATE KEY-----", + "description": "The private key used to sign the public certificate.\n\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`). The private key data will be redacted in the response for all get and list requests.\n" + }, + "publicCert": { + "type": "string", + "example": "-----BEGIN CERTIFICATE-----\\n MIICpzCCAhACCQDkjx7mP9cuRjANBgkqhkiG9w0BAQsFADCBlzELMAkGA1UEBhMC\\n MiJVGawyxDzBm2UhzNOE0ABHfjAgM6PAYmtMhhQawk6bmttXYhJeqhLSji4LEj5d\\n Z4FmXQ5rWM0RWBs=\\n -----END CERTIFICATE-----", + "description": "The end-entity certificate, in PEM format.\n\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\n" + }, + "caCerts": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "-----BEGIN CERTIFICATE-----\\n MIIE+zCCBGSgAwIBAgICAQ0wDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1Zh\\n WBsUs5iB0QQeyAfJg594RAoYC5jcdnplDQ1tgMQLARzLrUc+cb53S8wGd9D0Vmsf\\n SxOaFIqII6hR8INMqzW/Rn453HWkrugp++85j09VZw==\\n -----END CERTIFICATE-----" + ], + "description": "An optional list of intermediate certificates in PEM format that are used to validate the public certificate.\n\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\n" + } + } + }, + "PKCS12": { + "type": "object", + "description": "Defines a cert with key and certificates encoded in PKCS12 format.", + "required": [ + "data", + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "PKCS12" + ] + }, + "data": { + "type": "string", + "example": "MIIGoQIBAzCCBmcGCSqGSIb3DQEHAaCCBlgEggZUMIIGUDCCA08GCSqGSIb3DQEHBqCCA0AwggM8AgEAMIIDNQYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQIe7ZblBoEW3QCAggAgIIDCCgLEvzp9n69QbpGT0MDEwITAJBgUrDgMCGgUABBQJs6ZgeAMcxVLrq1hU+TlUOArMuQQIGK59vCBn0wECAggA", + "description": "A base-64-encoded string that contains a private key, a public certificate, and, optionally, other intermediate certificates." + }, + "password": { + "type": "string", + "example": "myPa$$w0rd", + "description": "The password to use to decrypt PKCS12 data." + } + } + }, + "RemoteFile": { + "type": "object", + "description": "Define a Cert resource by providing references to remote files.\n\n> **Note:** These are file path references only. The system can not validate the file contents or extract the certificate metadata. Providing a PEM or PKCS12 certificate is recommended.\n", + "required": [ + "privateKey", + "publicCert", + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "REMOTE_FILE" + ] + }, + "privateKey": { + "type": "string", + "example": "/certs/www.example.com/example.key", + "description": "The path to the private key file." + }, + "publicCert": { + "type": "string", + "example": "/certs/www.example.com/example.crt", + "description": "The path to the certificate bundle file. The file must contain the public certificate and may contain additional intermediate certificates." + } + } + }, + "GatewayName": { + "type": "string" + }, + "GatewayList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Gateway" + } + } + } + }, + "GZip": { + "properties": { + "isEnabled": { + "type": "boolean", + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip" + } + }, + "buffers": { + "type": "object", + "properties": { + "number": { + "type": "integer" + }, + "size": { + "type": "string", + "pattern": "^[0-9]+[k|K|m|M]{1}$" + } + }, + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_buffers" + } + }, + "level": { + "type": "integer", + "minimum": 1, + "maximum": 9, + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level" + } + }, + "disabledUserAgents": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "uniqueItems": true, + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_disable" + } + }, + "httpVersion": { + "type": "string", + "pattern": "^[1-3]{1}\\.[0-1]{1}$", + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_http_version" + } + }, + "minLength": { + "type": "integer", + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_min_length" + } + }, + "proxied": { + "type": "string", + "enum": [ + "DISABLED", + "EXPIRED", + "NOCACHE", + "NOSTORE", + "PRIVATE", + "NOLASTMODIFIED", + "NOETAG", + "AUTH", + "ANY" + ], + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_proxied" + } + }, + "mimeTypes": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "uniqueItems": true, + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_types" + } + }, + "vary": { + "type": "string", + "enum": [ + "DISABLED", + "ENABLED" + ], + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_vary" + } + }, + "static": { + "type": "string", + "enum": [ + "DISABLED", + "ENABLED", + "ALWAYS" + ], + "externalDocs": { + "url": "http://nginx.org/en/docs/http/ngx_http_gzip_static_module.html#gzip_static" + } + } + } + }, + "Compression": { + "description": "Gzip compression settings.", + "type": "object", + "properties": { + "gzip": { + "$ref": "#/components/schemas/GZip" + } + } + }, + "GatewayWebState": { + "description": "Non-ingress settings in a Gateway that apply only to Web Components.", + "type": "object", + "properties": { + "errorSetRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "compression": { + "$ref": "#/components/schemas/Compression" + } + } + }, + "NginxDirective": { + "x-f5-experimental": true, + "required": [ + "directive" + ], + "properties": { + "directive": { + "type": "string", + "description": "The name of the NGINX directive. For a list of NGINX Directives, refer to [the NGINX documentation](http://nginx.org/en/docs/dirindex.html).\n" + }, + "args": { + "type": "array", + "description": "Directive arguments.", + "items": { + "type": "string" + } + }, + "block": { + "type": "array", + "description": "The directives to include within a block directive or context.", + "items": { + "$ref": "#/components/schemas/NginxDirective" + } + } + } + }, + "NginxConf": { + "x-f5-experimental": true, + "required": [ + "path", + "op", + "block" + ], + "properties": { + "path": { + "type": "string", + "description": "Path is a JSONPath expression. See [goessner.net/articles/JsonPath](https://goessner.net/articles/JsonPath/) for more information on JSONPath.\n", + "example": "$..[?(@.directive=='server')]" + }, + "op": { + "type": "string", + "description": "Indicates where to insert the block of directives in relation to the directives\nselected by the given path.\n\n* `APPEND` - Add after the selected directives\n\n* `APPEND_CHILD` - Add after the last directive in the selected directives' block\n\n* `PREPEND` - Add before the selected directives\n\n* `PREPEND_CHILD` - Add before the first directive in the selected directives' block\n", + "enum": [ + "APPEND", + "APPEND_CHILD", + "PREPEND", + "PREPEND_CHILD" + ] + }, + "block": { + "type": "array", + "description": "Block contains a list of directives that you want to add to the NGINX configuration.\n", + "items": { + "$ref": "#/components/schemas/NginxDirective" + } + } + } + }, + "NginxConfs": { + "x-f5-experimental": true, + "description": "NGINX raw configuration", + "type": "array", + "items": { + "$ref": "#/components/schemas/NginxConf" + } + }, + "HA": { + "description": "Data path high availability settings", + "type": "object", + "properties": { + "isEnabled": { + "type": "boolean", + "description": "Enables or disables HA.\n\nWhen set to `true`, supports configuring instances in an active‑passive, high‑availability (HA) setup.\n\nTo configure the gateway on HA mode:\n - `keepalived` must be installed and configured on the desired instances.\n - At least one listen IP address must be specified in the `listenIps` section within `InstanceRefs`\n\n See the `listenIps` section for additional requirements for configuring HA.\n" + } + } + }, + "GatewayStateCommon": { + "description": "Non-ingress settings in a Gateway that apply to Web and TCP/UDP Components.", + "type": "object", + "properties": { + "nginxConfs": { + "$ref": "#/components/schemas/NginxConfs" + }, + "ha": { + "$ref": "#/components/schemas/HA" + } + } + }, + "ServiceConfigState": { + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "default": "DISABLED" + }, + "GatewaySocketSettings": { + "description": "Socket settings in a Gateway.", + "type": "object", + "properties": { + "setFib": { + "x-f5-experimental": true, + "type": "integer" + }, + "fastOpen": { + "x-f5-experimental": true, + "type": "integer" + }, + "acceptFilter": { + "x-f5-experimental": true, + "type": "string", + "enum": [ + "DATA_READY", + "HTTP_READY" + ] + }, + "deferred": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + }, + "backlog": { + "x-f5-experimental": true, + "type": "integer", + "default": -1 + }, + "isIpv6Only": { + "x-f5-experimental": true, + "type": "boolean", + "default": false + }, + "reusePort": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + }, + "tcpKeepAlive": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ServiceConfigState" + }, + "idle": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$" + }, + "interval": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$" + }, + "count": { + "type": "integer" + } + } + }, + "receiveBufferSize": { + "type": "string", + "pattern": "^[0-9]+[k|K|m|M]{1}$" + }, + "sendBufferSize": { + "type": "string", + "pattern": "^[0-9]+[k|K|m|M]{1}$" + } + } + }, + "URIMatchMethod": { + "description": "Specifies how to determine a match between an incoming Web URI and configured Web URI.", + "type": "string", + "enum": [ + "PREFIX", + "REGEX", + "REGEX_CASE_SENSITIVE", + "SUFFIX", + "EXACT" + ], + "default": "PREFIX" + }, + "TLS": { + "description": "TLS settings applicable to URIs.", + "type": "object", + "required": [ + "certRef" + ], + "properties": { + "certRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "protocols": { + "type": "array", + "items": { + "type": "string", + "pattern": "TLSv1|TLSv1\\.[1-3]|SSLv2|SSLv3" + } + }, + "cipher": { + "type": "string", + "example": "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;" + }, + "preferServerCipher": { + "$ref": "#/components/schemas/ServiceConfigState" + }, + "sessionCache": { + "type": "string", + "enum": [ + "OFF", + "NONE", + "BUILTIN", + "SHARED" + ], + "default": "OFF" + } + } + }, + "IngressUri": { + "type": "object", + "properties": { + "matchMethod": { + "$ref": "#/components/schemas/URIMatchMethod" + }, + "tls": { + "$ref": "#/components/schemas/TLS" + }, + "serverPoolPort": { + "x-f5-experimental": true, + "description": "The port number used when the IngressUri is deployed to an Instance that's a member of a BIG-IP server pool.", + "type": "integer" + } + } + }, + "IngressUris": { + "type": "object", + "description": "Defines the URI in Gateways and Components. The URI has different requirements depending on where it is used.\n\nFor Web URIs in Gateways, `uris` must be a complete URI that follows the format `://host[:port]`;\nadditionally each URI can have a match method specified and an HTTPS URI can include TLS information.\n Examples:\n - `http://www.f5.com`\n - `https://www.f5.com`\n - `http://www.f5.com:8080`\n - `http://*.info.f5.com`\n\nFor Web URIs in Components, `uris` can be either a complete URI that follows the format `://host[:port][/path]`\nor a relative path that follows the format `/path[/...]`;\nadditionally each URI can have a match method specified and an HTTPS URI can include TLS information.\n Examples:\n - `/images`\n - `/*.jpg`\n - `/locations/us/wa*`\n - `http://www.f5.com:8080/sales`\n - `http://*.f5.com:5050/testing`\n\nFor TCP/UDP URIs in Gateways and Components,`uris` must be a complete URI that follows the format `://<*|IP>:`;\nadditionally a tcp+tls URI can include TLS information. Match method is not applicable to TCP/UDP URIs.\n Examples:\n - `tcp://192.168.1.1:12345`\n - `tcp+tls://192.168.1.1:12346`\n - `tcp://192.168.1.1:12345-12350`\n - `tcp://*:12345`\n - `udp://192.168.1.1:12345`\n - `udp://*:12345`\n\nIn a TCP/UDP Component, URIs can either all have a protocol of udp or a mix of TCP and tcp+tls.\n", + "additionalProperties": { + "description": "Provide the URI associated with the resource.", + "$ref": "#/components/schemas/IngressUri" + } + }, + "InstanceGroupRef": { + "x-f5-experimental": true, + "allOf": [ + { + "$ref": "#/components/schemas/ResourceRef" + }, + { + "type": "object", + "properties": { + "listenIps": { + "x-f5-experimental": true, + "description": "The list of Listen IP addresses.\nSets the BIG-IP virtual address(es) on which the server listens for and accepts requests.\n", + "type": "array", + "items": { + "type": "string", + "pattern": "^(?:(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)$", + "example": "1.1.1.1" + } + } + } + } + ] + }, + "Placement": { + "description": "Instances that have NGINX configuration applied corresponding to the Gateway and associated Components settings.", + "type": "object", + "properties": { + "instanceRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceRef" + } + }, + "instanceGroupRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceGroupRef" + } + } + } + }, + "GatewayIngressCommon": { + "description": "Ingress settings in a Gateway that apply to Web and TCP/UDP Components.", + "allOf": [ + { + "$ref": "#/components/schemas/GatewaySocketSettings" + }, + { + "type": "object", + "required": [ + "uris", + "placement" + ], + "properties": { + "uris": { + "$ref": "#/components/schemas/IngressUris" + }, + "tls": { + "$ref": "#/components/schemas/TLS" + }, + "placement": { + "$ref": "#/components/schemas/Placement" + } + } + } + ] + }, + "HeaderMatchMethod": { + "type": "string", + "enum": [ + "PREFIX", + "REGEX", + "REGEX_CASE_SENSITIVE", + "SUFFIX", + "EXACT" + ], + "default": "REGEX" + }, + "IngressHeader": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "nameMatchMethod": { + "$ref": "#/components/schemas/HeaderMatchMethod" + }, + "value": { + "type": "string" + }, + "valueMatchMethod": { + "$ref": "#/components/schemas/HeaderMatchMethod" + } + } + }, + "WebIngressCommon": { + "description": "Ingress settings in a Gateway and Component that apply only to Web Components.", + "type": "object", + "properties": { + "methods": { + "description": "Specifies the HTTP method to use in requests.", + "type": "array", + "items": { + "type": "string", + "enum": [ + "POST", + "GET", + "PUT", + "DELETE", + "PATCH", + "HEAD", + "TRACE", + "OPTIONS", + "CONNECT" + ] + } + }, + "clientMaxBodySize": { + "description": "Sets the maximum allowed size of the client request body, specified in the “Content-Length” request header field.", + "type": "string", + "pattern": "^[0-9]+[k|K|m|M]{1}$", + "deprecated": true, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size" + } + }, + "headers": { + "description": "Specifies the match method for headers to be used in requests.", + "x-f5-experimental": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/IngressHeader" + } + }, + "http2": { + "description": "Enable or disable HTTP/2 connections on the port. Normally, for this to work the `ssl` parameter should be specified as well,\nbut NGINX can also be configured to accept HTTP/2 connections without SSL.\nPossible values are `ENABLED` or `DISABLED`.\n", + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#listen" + } + }, + "spdy": { + "description": "Enables or disables acceptance of the SPDY connections on the specified port.\nNormally, for this to work the `ssl` parameter should be specified as well,\nbut NGINX can also be configured to accept SPDY connections without SSL. Possible values are `ENABLED` or `DISABLED`.\n", + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#listen" + } + }, + "proxyProtocol": { + "description": "Enables or disables the proxy protocol for all connections accepted on the specified port.\nThe `proxy protocol` enables NGINX and NGINX Plus to receive client connection information passed through proxy servers and load balancers,\nsuch as HAproxy and Amazon Elastic Load Balancer (ELB). The possible values are `ENABLED` or `DISABLED`.\n", + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#listen" + } + }, + "notFoundStatusCode": { + "x-f5-experimental": true, + "type": "integer", + "default": 404 + }, + "headersHashBucketSize": { + "description": "Sets the bucket size for hash tables used by the `proxy_hide_header` and `proxy_set_header` directives.", + "type": "integer", + "minimum": 1, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size" + } + }, + "headersHashMaxSize": { + "description": "Sets the maximum size of hash tables used by the `proxy_hide_header` and `proxy_set_header` directives.", + "type": "integer", + "minimum": 1, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size" + } + } + } + }, + "GatewayWebIngressClient": { + "description": "Non-buffer settings in a Gateway applicable to Web client requests.", + "type": "object", + "properties": { + "bodyInFileOnly": { + "description": "Determines whether NGINX Controller should save the entire client request body into a file.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED", + "CLEAN" + ], + "default": "DISABLED", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_file_only" + } + }, + "bodyTimeout": { + "description": "Defines a timeout for reading the client request body.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout" + } + }, + "headerTimeout": { + "description": "Defines a timeout for reading the client request header.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout" + } + }, + "maxBodySize": { + "description": "Sets the maximum allowed size of the client request body, specified in the “Content-Length” request header field.\n\nDisables checking of client request body size when set to 0.\n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size" + } + } + } + }, + "WebIngressBuffersCommon": { + "description": "Buffer settings common to a Gateway and Component applicable to web client requests.", + "type": "object", + "properties": { + "clientBodyBufferingIsEnabled": { + "description": "Enables or disables buffering of a client request body.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering" + } + }, + "clientBodyBufferSize": { + "description": "Sets the buffer size for reading the client request body.", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size" + } + }, + "clientBodyInSingleBuffer": { + "description": "Determines whether NGINX Controller should save the entire client request body in a single buffer.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_single_buffer" + } + } + } + }, + "GatewayWebIngress": { + "description": "Ingress settings in a Gateway that apply only to Web Components.", + "type": "object", + "properties": { + "client": { + "$ref": "#/components/schemas/GatewayWebIngressClient" + }, + "allowUnderscoresInHeaders": { + "type": "boolean", + "default": false, + "description": "Allows the use of underscores in client request header fields.\n\nWhen set to `disabled`, request headers with names that contain underscores are considered invalid and are ignored.\n" + }, + "buffers": { + "allOf": [ + { + "$ref": "#/components/schemas/WebIngressBuffersCommon" + }, + { + "type": "object", + "properties": { + "clientHeaderBufferSize": { + "description": "Sets the buffer size for reading the client request header.", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size" + } + } + } + } + ] + } + } + }, + "GatewayIngress": { + "description": "Ingress settings in a Gateway.", + "allOf": [ + { + "$ref": "#/components/schemas/GatewayIngressCommon" + }, + { + "$ref": "#/components/schemas/WebIngressCommon" + }, + { + "$ref": "#/components/schemas/GatewayWebIngress" + } + ] + }, + "GatewayDesiredState": { + "description": "The desired gateway settings that the user wants in the configuration on NGINX instances referenced by this Gateway.", + "allOf": [ + { + "$ref": "#/components/schemas/GatewayWebState" + }, + { + "$ref": "#/components/schemas/GatewayStateCommon" + }, + { + "type": "object", + "required": [ + "ingress" + ], + "properties": { + "ingress": { + "$ref": "#/components/schemas/GatewayIngress" + } + } + } + ] + }, + "ErrorSetList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorSet" + } + } + } + }, + "ErrorSet": { + "type": "object", + "description": "Defines the set of error messages to be returned for HTTP errors.", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/ErrorSetDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/ErrorSetCurrentStatus" + } + } + }, + "ErrorSetDesiredState": { + "type": "object", + "properties": { + "errorCodes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorCode" + } + } + } + }, + "ErrorSetCurrentStatus": { + "type": "object", + "properties": { + "errorCodes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorCode" + } + } + } + }, + "ErrorCode": { + "type": "object", + "required": [ + "code", + "message" + ], + "properties": { + "code": { + "type": "integer", + "minimum": 400, + "maximum": 599, + "example": 404 + }, + "message": { + "type": "string", + "example": "{\"status\":404,\"message\":\"Resource not found\"}" + } + } + }, + "PublishedAPIList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PublishedAPI" + } + } + } + }, + "PublishedAPI": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/PublishedAPICurrentStatus" + }, + "desiredState": { + "$ref": "#/components/schemas/PublishedAPIDesiredState" + } + } + }, + "PublishedAPICurrentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "basePath": { + "type": "string", + "default": "/" + }, + "stripWorkloadBasePath": { + "type": "boolean", + "default": true + }, + "componentRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "apiDefinitionVersionRef": { + "description": "Reference to the Version of the API Definition.\n", + "$ref": "#/components/schemas/ResourceRef" + }, + "gatewayRefs": { + "type": "array", + "description": "Reference to the Gateway associated with the Published API.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "devportalRefs": { + "type": "array", + "description": "Reference to the Dev Portal associated with the Published API.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "PublishedAPIDesiredState": { + "type": "object", + "required": [ + "apiDefinitionVersionRef", + "gatewayRefs" + ], + "properties": { + "basePath": { + "type": "string", + "default": "/" + }, + "stripWorkloadBasePath": { + "type": "boolean", + "default": true + }, + "apiDefinitionVersionRef": { + "description": "Reference to the Version of the API Definition.\n", + "$ref": "#/components/schemas/ResourceRef" + }, + "gatewayRefs": { + "type": "array", + "description": "Reference to the Gateway associated with the Published API.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "devportalRefs": { + "type": "array", + "description": "Reference to the Dev Portal associated with the Published API.", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "ComponentList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Component" + } + } + } + }, + "ComponentStateCommon": { + "type": "object", + "description": "Settings common to Web & TCP/UDP Components.", + "properties": { + "nginxConfs": { + "$ref": "#/components/schemas/NginxConfs" + } + } + }, + "ComponentIngressCommon": { + "description": "Ingress settings common to Web and TCP/UDP components.", + "type": "object", + "required": [ + "uris" + ], + "properties": { + "uris": { + "$ref": "#/components/schemas/IngressUris" + }, + "gatewayRefs": { + "description": "Reference(s) to existing Gateway resource(s).", + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "tls": { + "$ref": "#/components/schemas/TLS" + } + } + }, + "ComponentWebIngressClient": { + "description": "Non-buffer settings in a Component applicable to Web client requests.", + "type": "object", + "properties": { + "bodyInFileOnly": { + "description": "Determines whether NGINX Controller should save the entire client request body into a file.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED", + "CLEAN" + ], + "default": "DISABLED", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_file_only" + } + }, + "bodyTimeout": { + "description": "Defines a timeout for reading the client request body.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout" + } + }, + "maxBodySize": { + "description": "Sets the maximum allowed size of the client request body, specified in the “Content-Length” request header field.\n\nDisables checking of client request body size when set to 0.\n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size" + } + } + } + }, + "ComponentWebIngress": { + "description": "Ingress settings in a Web Component.", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentIngressCommon" + }, + { + "$ref": "#/components/schemas/WebIngressCommon" + }, + { + "type": "object", + "properties": { + "client": { + "$ref": "#/components/schemas/ComponentWebIngressClient" + }, + "buffers": { + "$ref": "#/components/schemas/WebIngressBuffersCommon" + } + } + } + ] + }, + "MonitoringCommon": { + "description": "Monitor settings common to Web and TCP/UDP.", + "type": "object", + "properties": { + "defaultState": { + "type": "string", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "default": "HEALTHY" + }, + "interval": { + "type": "integer", + "minimum": 1, + "default": 5 + }, + "consecutiveSuccessThreshold": { + "type": "integer", + "minimum": 1, + "default": 1 + }, + "consecutiveFailureThreshold": { + "type": "integer", + "minimum": 1, + "default": 1 + }, + "port": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + } + } + }, + "MonitorResponseStatus": { + "type": "object", + "properties": { + "range": { + "type": "object", + "properties": { + "startCode": { + "type": "integer", + "minimum": 100, + "maximum": 599 + }, + "endCode": { + "type": "integer", + "minimum": 100, + "maximum": 599 + } + } + }, + "codes": { + "type": "array", + "items": { + "type": "integer", + "minimum": 100, + "maximum": 599 + } + }, + "match": { + "type": "boolean", + "default": true + } + } + }, + "MonitorResponseContent": { + "type": "object", + "properties": { + "content": { + "type": "string" + }, + "match": { + "type": "boolean", + "default": true + } + } + }, + "WebMonitorResponse": { + "description": "Settings that define successful responses to a Web monitor.", + "type": "object", + "properties": { + "status": { + "$ref": "#/components/schemas/MonitorResponseStatus" + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/MonitorResponseContent" + } + }, + "body": { + "$ref": "#/components/schemas/MonitorResponseContent" + } + } + }, + "WebMonitoring": { + "description": "Health monitor settings across all workload groups in a Web Component.", + "allOf": [ + { + "$ref": "#/components/schemas/MonitoringCommon" + }, + { + "type": "object", + "properties": { + "uri": { + "description": "URI containing the relative path that the monitor probe is sent to; the host is specified in the URI in the workload group.", + "type": "string", + "default": "/" + }, + "response": { + "description": "Settings that define successful responses to a Web monitor.", + "$ref": "#/components/schemas/WebMonitorResponse" + } + } + } + ] + }, + "RoundRobinLB": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "ROUND_ROBIN" + ] + } + } + }, + "IPHashLB": { + "type": "object", + "description": "IP Hash Load Balancing only applicable to Web Components.", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "IPHASH" + ] + } + } + }, + "LeastConnLB": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "LEAST_CONNECTIONS" + ] + } + } + }, + "HashLBMethod": { + "type": "object", + "required": [ + "type", + "userKey" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "HASH" + ] + }, + "userKey": { + "type": "string" + }, + "consistentHash": { + "$ref": "#/components/schemas/ServiceConfigState" + } + } + }, + "LeastTimeLBMethod": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "LEAST_TIME" + ] + }, + "latencyParameter": { + "type": "string", + "description": "Values applicable to a Web Component are: [HEADER, LAST_BYTE, LAST_BYTE_INFLIGHT];\nValues applicable to a TCP/UDP Component are: [CONNECT, FIRST_BYTE, LAST_BYTE, LAST_BYTE_INFLIGHT].\nThe default value is used for a web Component; there is no default for a TCP/UDP Component.\n", + "enum": [ + "HEADER", + "CONNECT", + "FIRST_BYTE", + "LAST_BYTE", + "LAST_BYTE_INFLIGHT" + ], + "default": "HEADER" + } + } + }, + "RandomLBMethod": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "RANDOM" + ] + }, + "twoServerLBMethod": { + "type": "string", + "description": "Values applicable to a Web Component are: [LEAST_CONNECTIONS, LEAST_TIME_HEADER, LEAST_TIME_LAST_BYTE];\nValues applicable to a TCP/UDP Component are: [LEAST_CONNECTIONS, LEAST_TIME_CONNECT, LEAST_TIME_FIRST_BYTE, LEAST_TIME_LAST_BYTE].\n", + "enum": [ + "LEAST_CONNECTIONS", + "LEAST_TIME_HEADER", + "LEAST_TIME_CONNECT", + "LEAST_TIME_FIRST_BYTE", + "LEAST_TIME_LAST_BYTE" + ] + } + } + }, + "LoadBalancingMethod": { + "discriminator": { + "propertyName": "type", + "mapping": { + "ROUND_ROBIN": "#/components/schemas/RoundRobinLB", + "IPHASH": "#/components/schemas/IPHashLB", + "LEAST_CONNECTIONS": "#/components/schemas/LeastConnLB", + "HASH": "#/components/schemas/HashLBMethod", + "LEAST_TIME": "#/components/schemas/LeastTimeLBMethod", + "RANDOM": "#/components/schemas/RandomLBMethod" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/RoundRobinLB" + }, + { + "$ref": "#/components/schemas/IPHashLB" + }, + { + "$ref": "#/components/schemas/LeastConnLB" + }, + { + "$ref": "#/components/schemas/HashLBMethod" + }, + { + "$ref": "#/components/schemas/LeastTimeLBMethod" + }, + { + "$ref": "#/components/schemas/RandomLBMethod" + } + ] + }, + "DNSServiceDiscovery": { + "required": [ + "servers" + ], + "properties": { + "servers": { + "type": "array", + "description": "Array of DNS servers. Possible options are:\n- An IPv4 address with an optional port number.\n Port 53 is used if not specified.\n For example, \"10.1.1.1\", \"10.1.1.1:5353\".\n- An IPv6 address with an optional port number.\n Port 53 is used if not specified.\n For example, \"[2001::1]\", \"[2001::1]:5353\",\n- Fully qualified domain name (FQDN). ASCII characters only.\n NGINX uses the OS name server configuration\n to identify the IP addresses of the DNS servers to use.\n", + "items": { + "type": "string", + "pattern": "^(?:(?:(?:(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)(?::(?:[1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5]))?$)|(?:\\[\\s*(?:(?:(?:[0-9a-fA-F]{1,4}:){7}(?:[0-9a-fA-F]{1,4}|:))|(?:(?:[0-9a-fA-F]{1,4}:){6}(?::[0-9a-fA-F]{1,4}|(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(?:(?:[0-9a-fA-F]{1,4}:){5}(?:(?:(?::[0-9a-fA-F]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(?:(?:[0-9a-fA-F]{1,4}:){4}(?:(?:(?::[0-9a-fA-F]{1,4}){1,3})|(?:(?::[0-9a-fA-F]{1,4})?:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9a-fA-F]{1,4}:){3}(?:(?:(?::[0-9a-fA-F]{1,4}){1,4})|(?:(?::[0-9a-fA-F]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9a-fA-F]{1,4}:){2}(?:(?:(?::[0-9a-fA-F]{1,4}){1,5})|(?:(?::[0-9a-fA-F]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9a-fA-F]{1,4}:){1}(?:(?:(?::[0-9a-fA-F]{1,4}){1,6})|(?:(?::[0-9a-fA-F]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?::(?:(?:(?::[0-9a-fA-F]{1,4}){1,7})|(?:(?::[0-9a-fA-F]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:)))(?:%.+)?](?::(?:[1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5]))?\\s*$)|(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[-0-9a-zA-Z]{0,61}[0-9a-zA-Z])?)*$))", + "minLength": 1, + "example": "10.1.1.1:5353" + }, + "minItems": 1, + "uniqueItems": true + }, + "ttl": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$", + "description": "Overrides the TTL setting present in the DNS record.", + "example": "10s" + }, + "timeout": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$", + "description": "Sets the timeout for domain name resolution.", + "example": "10s" + } + } + }, + "WorkloadGroupCommon": { + "description": "Settings common to Web and TCP/UDP workloadGroups.", + "type": "object", + "properties": { + "locationRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "loadBalancingMethod": { + "$ref": "#/components/schemas/LoadBalancingMethod" + }, + "dnsServiceDiscovery": { + "$ref": "#/components/schemas/DNSServiceDiscovery" + } + } + }, + "WebProxy": { + "description": "Proxy retry and timeout settings applicable to servers in a Web workloadGroup associated with a Component.", + "type": "object", + "properties": { + "nextUpstream": { + "description": "Specifies in which cases a request should be passed to the next server.", + "type": "array", + "items": { + "type": "string", + "enum": [ + "ERROR", + "TIMEOUT", + "INVALID_HEADER", + "HTTP_500", + "HTTP_502", + "HTTP_503", + "HTTP_504", + "HTTP_403", + "HTTP_404", + "HTTP_429", + "NON_IDEMPOTENT", + "OFF" + ] + } + }, + "connectTimeout": { + "description": "Defines a timeout for establishing a connection with a proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$" + }, + "sendTimeout": { + "description": "Sets a timeout for transmitting a request to the proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$" + } + } + }, + "SessionPersistenceCookie": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "COOKIE" + ] + }, + "srvID": { + "type": "string" + }, + "expireTime": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$" + }, + "domain": { + "type": "string" + }, + "path": { + "type": "string" + } + } + }, + "SessionPersistenceRoute": { + "type": "object", + "required": [ + "type", + "routeInfoLocation" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "ROUTE" + ] + }, + "routeInfoLocation": { + "type": "string", + "enum": [ + "COOKIE", + "URI", + "BOTH" + ] + } + } + }, + "SessionPersistenceCookieLearn": { + "type": "object", + "required": [ + "type", + "create", + "lookup" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "COOKIE_LEARN" + ] + }, + "create": { + "type": "string", + "pattern": "^\\$.+" + }, + "lookup": { + "type": "string", + "pattern": "^\\$.+" + } + } + }, + "SessionPersistence": { + "description": "SessionPersistence settings in a Web workloadGroup.", + "discriminator": { + "propertyName": "type", + "mapping": { + "COOKIE": "#/components/schemas/SessionPersistenceCookie", + "ROUTE": "#/components/schemas/SessionPersistenceRoute", + "COOKIE_LEARN": "#/components/schemas/SessionPersistenceCookieLearn" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/SessionPersistenceCookie" + }, + { + "$ref": "#/components/schemas/SessionPersistenceRoute" + }, + { + "$ref": "#/components/schemas/SessionPersistenceCookieLearn" + } + ] + }, + "WorkloadUri": { + "type": "object", + "properties": { + "weight": { + "type": "integer", + "minimum": 1, + "default": 1 + }, + "maxConns": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "maxFails": { + "type": "integer", + "minimum": 0, + "default": 1 + }, + "failTimeout": { + "x-f5-experimental": true, + "type": "string", + "default": "10s", + "pattern": "^[0-9]+[h|m|s]{1}$" + }, + "isBackup": { + "type": "boolean", + "default": false + }, + "isDown": { + "type": "boolean", + "default": false + }, + "route": { + "x-f5-experimental": true, + "type": "string" + }, + "srvService": { + "type": "string" + }, + "slowStart": { + "x-f5-experimental": true, + "type": "integer", + "minimum": 0, + "default": 0 + }, + "isDrain": { + "type": "boolean", + "default": false + } + } + }, + "WebWorkloadGroup": { + "description": "Group of servers hosting a part of a Web application represented by a Component.", + "allOf": [ + { + "$ref": "#/components/schemas/WorkloadGroupCommon" + }, + { + "type": "object", + "properties": { + "proxy": { + "$ref": "#/components/schemas/WebProxy" + }, + "sessionPersistence": { + "$ref": "#/components/schemas/SessionPersistence" + }, + "uris": { + "type": "object", + "description": "The URI for a server hosting a part of a Web application.\n\nIt must conform to the format `schema://address[:port]`\nwhere schema is chosen from http or https, address is IP or hostname,\nschema and address must be provided.\n\nFor example:\n\n- `http://192.0.2.247`\n- `https://192.0.2.247:8443`\n- `https://www.f5workload.com`\n", + "additionalProperties": { + "$ref": "#/components/schemas/WorkloadUri" + } + } + } + } + ] + }, + "BackendBuffers": { + "description": "Proxy buffer settings applicable to servers across all Web workloadGroups associated with a Component.", + "type": "object", + "properties": { + "headerSize": { + "description": "Sets the size of the buffer used for reading the first part of the response received from the proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size" + } + }, + "isEnabled": { + "description": "Enables or disables buffering of responses from the proxied server.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering" + } + }, + "size": { + "description": "Sets the number and size of the buffers used for reading a response from the proxied server, for a single connection.", + "type": "object", + "properties": { + "number": { + "type": "integer" + }, + "size": { + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$" + } + }, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers" + } + }, + "busySize": { + "description": "When buffering of responses from the proxied server is enabled, it limits the total size of buffers that can be busy sending a response to the client while the response is not yet fully read.", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_busy_buffers_size" + } + }, + "ignoreClientAbort": { + "description": "Determines whether the connection with a proxied server should be closed when a client closes the connection without waiting for a response.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_client_abort" + } + }, + "forceRanges": { + "description": "Enables byte-range support for both cached and uncached responses from the proxied server regardless of the \"Accept-Ranges\" field in these responses.", + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_force_ranges" + } + }, + "httpVersion": { + "description": "Sets the HTTP protocol version for proxying.", + "type": "string", + "enum": [ + "1.0", + "1.1" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version" + } + }, + "rate": { + "description": "Limits the speed (in bytes per second) of reading the response from the proxied server.\n\nDisables rate limiting when set to 0. \n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_limit_rate" + } + }, + "readTimeout": { + "description": "Defines a timeout for reading a response from the proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout" + } + }, + "maxTempFileSize": { + "description": "Sets the maximum size of the temporary file that the response can be saved into. Note that the buffering of responses from proxied server is enabled and the response does not fit into the buffers.\n\nDisables temporary file usage when set to 0.\n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_max_temp_file_size" + } + }, + "tempFileWriteSize": { + "description": "Limits the size of data written to a temporary file at a time, when buffering of responses from the proxied server to temporary files is enabled.\n\nDisables temporary file usage when set to 0.\n", + "type": "string", + "pattern": "^[0-9]{1,12}[kKmM]?$", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_file_write_size" + } + } + } + }, + "WebBackend": { + "description": "Backend settings in a Web Component.\n", + "type": "object", + "properties": { + "keepAlive": { + "type": "object", + "properties": { + "connections": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "requestsPerConn": { + "type": "integer" + }, + "idleTimeout": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$", + "example": "1h" + } + } + }, + "monitoring": { + "$ref": "#/components/schemas/WebMonitoring" + }, + "workloadGroups": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/WebWorkloadGroup" + } + }, + "preserveHostHeader": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + }, + "queue": { + "x-f5-experimental": true, + "type": "object", + "required": [ + "length" + ], + "properties": { + "length": { + "type": "integer" + }, + "timeOut": { + "type": "string", + "pattern": "^[0-9]+[h|m|s]{1}$", + "example": "1h" + } + } + }, + "httpVersion": { + "x-f5-experimental": true, + "type": "string", + "pattern": "^[1-3]{1}\\.[0-1]{1}$", + "example": 1 + }, + "ntlmAuthentication": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + }, + "persistentState": { + "x-f5-experimental": true, + "type": "string" + }, + "buffers": { + "$ref": "#/components/schemas/BackendBuffers" + }, + "isSocketKeepaliveEnabled": { + "description": "Configures the “TCP keepalive” behavior for outgoing connections to a proxied server.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_socket_keepalive" + } + }, + "ignoreHeaders": { + "description": "Disables processing of certain response header fields from the proxied server.", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "X-Accel-Redirect", + "X-Accel-Expires", + "X-Accel-Limit-Rate", + "X-Accel-Buffering", + "X-Accel-Charset", + "Expires", + "Cache-Control", + "Set-Cookie", + "Vary" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers" + } + }, + "debugHeaders": { + "description": "Permits passing otherwise disabled header fields from a proxied server to a client.", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "X-Accel-Redirect", + "X-Accel-Expires", + "X-Accel-Limit-Rate", + "X-Accel-Buffering", + "X-Accel-Charset", + "Expires", + "Cache-Control", + "Set-Cookie", + "Vary" + ], + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_header" + } + }, + "tls": { + "description": "TLS settings applicable to servers in Web workloadGroups.", + "type": "object", + "properties": { + "cipher": { + "description": "Specifies the enabled ciphers for requests to a proxied HTTPS server.", + "type": "string", + "example": "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_ciphers" + } + }, + "name": { + "description": "Allows overriding the server name used to verify the certificate of the proxied HTTPS server.", + "type": "string", + "example": "$proxy_host", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_name" + } + }, + "protocols": { + "description": "Enables the specified protocols for requests to a proxied HTTPS server.", + "type": "array", + "items": { + "type": "string", + "pattern": "TLSv1|TLSv1\\.[1-3]|SSLv2|SSLv3" + }, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_protocols" + } + }, + "isServerNameEnabled": { + "description": "Enables or disables passing of the server name through TLS Server Name Indication extension (SNI, RFC 6066) when establishing a connection with the proxied HTTPS server.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_server_name" + } + }, + "isSessionReuseEnabled": { + "description": "Determines whether SSL sessions can be reused when working with the proxied server.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_session_reuse" + } + }, + "isVerifiedEnabled": { + "description": "Enables or disables verification of the proxied HTTPS server certificate.", + "type": "boolean", + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify" + } + }, + "verifyDepth": { + "description": "Sets the verification depth in the proxied HTTPS server certificates chain.", + "type": "integer", + "minimum": 0, + "externalDocs": { + "url": "https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify_depth" + } + } + } + } + } + }, + "ApplicableUri": { + "type": "object", + "required": [ + "uri" + ], + "properties": { + "uri": { + "type": "string" + }, + "matchMethod": { + "$ref": "#/components/schemas/URIMatchMethod" + } + } + }, + "ApplicableUris": { + "type": "array", + "description": "Defines an array of ingress URIs with a corresponding matchMethod that this rewrite rule applies to.\nBy default, a rewrite rule applies to all of the ingress URIs in the component.\nA rewrite rule can also be applied at a gateway level by specifying ingress URIs\nfrom the gateway. For example \"http://www.nginx.com\". Note that applying the rewrite\nrule to URIs at a gateway level can affect other components. The URI and the matchMethod must\nmatch an ingress URI defined either at the component or gateway level. If no match is found\nthe request is rejected.\n", + "items": { + "$ref": "#/components/schemas/ApplicableUri" + } + }, + "UriRewrite": { + "type": "object", + "required": [ + "incomingPattern", + "rewritePattern" + ], + "properties": { + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + }, + "incomingPattern": { + "type": "string", + "description": "The regex pattern to match against the request URIs that are expected to be rewritten." + }, + "rewritePattern": { + "type": "string", + "description": "The replacement regex pattern to apply to the URIs that are to be rewritten.", + "minLength": 1 + }, + "afterExecute": { + "type": "string", + "enum": [ + "NONE", + "LAST", + "BREAK", + "REDIRECT", + "PERMANENT" + ], + "default": "BREAK" + } + } + }, + "UriRedirect": { + "type": "object", + "required": [ + "responseCode" + ], + "properties": { + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + }, + "responseCode": { + "type": "integer", + "minimum": 300, + "maximum": 308 + }, + "url": { + "type": "string", + "minLength": 1, + "description": "The return url to use for responses in the 301-308 range." + }, + "text": { + "type": "string", + "description": "The return text to send for 300 responses." + } + } + }, + "ProgrammabilityAction": { + "type": "string", + "enum": [ + "ADD", + "MODIFY", + "DELETE" + ] + }, + "CookieModification": { + "type": "object", + "required": [ + "action", + "cookieName" + ], + "properties": { + "action": { + "$ref": "#/components/schemas/ProgrammabilityAction" + }, + "cookieName": { + "type": "string" + }, + "cookieValue": { + "type": "string" + } + } + }, + "ProgrammabilityAddRequestHeader": { + "type": "object", + "required": [ + "action", + "headerName", + "headerValue" + ], + "properties": { + "action": { + "type": "string", + "description": "Adds request header details.", + "enum": [ + "ADD" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the response header to modify.", + "minLength": 1 + }, + "headerValue": { + "type": "string", + "description": "The value to apply to the request header.", + "minLength": 1 + }, + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + } + } + }, + "ProgrammabilityDeleteRequestHeader": { + "type": "object", + "required": [ + "action", + "headerName" + ], + "properties": { + "action": { + "type": "string", + "description": "Deletes request header details.", + "enum": [ + "DELETE" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the request header to modify.", + "minLength": 1 + }, + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + } + } + }, + "RequestHeaderModification": { + "discriminator": { + "propertyName": "action", + "mapping": { + "ADD": "#/components/schemas/ProgrammabilityAddRequestHeader", + "DELETE": "#/components/schemas/ProgrammabilityDeleteRequestHeader" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ProgrammabilityAddRequestHeader" + }, + { + "$ref": "#/components/schemas/ProgrammabilityDeleteRequestHeader" + } + ] + }, + "ProgrammabilityAddResponseHeader": { + "type": "object", + "required": [ + "action", + "headerName", + "headerValue" + ], + "properties": { + "action": { + "type": "string", + "description": "Adds response header details.", + "enum": [ + "ADD" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the response header to modify.", + "minLength": 1 + }, + "headerValue": { + "type": "string", + "description": "The value to apply to the response header.", + "minLength": 1 + }, + "responseCodeFilter": { + "type": "string", + "description": "The value to apply to the response code filter.", + "enum": [ + "ALWAYS", + "PRE_DEFINED" + ], + "default": "PRE_DEFINED" + }, + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + } + } + }, + "ProgrammabilityDeleteResponseHeader": { + "type": "object", + "required": [ + "action", + "headerName" + ], + "properties": { + "action": { + "type": "string", + "description": "Deletes response header details.", + "enum": [ + "DELETE" + ] + }, + "headerName": { + "type": "string", + "description": "The name of the response header to modify.\n\n> Warning: `Date`, `Content-Length`, and `Connection` headers cannot be deleted.\n", + "minLength": 1 + }, + "applicableURIs": { + "$ref": "#/components/schemas/ApplicableUris" + } + } + }, + "ResponseHeaderModification": { + "discriminator": { + "propertyName": "action", + "mapping": { + "ADD": "#/components/schemas/ProgrammabilityAddResponseHeader", + "DELETE": "#/components/schemas/ProgrammabilityDeleteResponseHeader" + } + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ProgrammabilityAddResponseHeader" + }, + { + "$ref": "#/components/schemas/ProgrammabilityDeleteResponseHeader" + } + ] + }, + "Programmability": { + "type": "object", + "properties": { + "uriRewrites": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UriRewrite" + } + }, + "httpHttpsRedirect": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + }, + "uriRedirects": { + "x-f5-experimental": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/UriRedirect" + } + }, + "cookieModifications": { + "x-f5-experimental": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/CookieModification" + } + }, + "requestHeaderModifications": { + "type": "array", + "items": { + "$ref": "#/components/schemas/RequestHeaderModification" + } + }, + "responseHeaderModifications": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResponseHeaderModification" + } + } + } + }, + "Logging": { + "description": "Settings for error logs and access logs.", + "type": "object", + "properties": { + "errorLog": { + "$ref": "#/components/schemas/ServiceConfigState" + }, + "accessLog": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ServiceConfigState" + }, + "format": { + "type": "string" + } + } + } + } + }, + "WAF": { + "description": "Defines the desired configurations for the WAF associated with the application component.", + "properties": { + "isEnabled": { + "type": "boolean", + "default": true, + "description": "Indicates whether the WAF is enabled or not." + }, + "isMonitorOnly": { + "type": "boolean", + "default": true, + "description": "Indicates whether the WAF will monitor or block security violations." + }, + "signatureOverrides": { + "type": "object", + "additionalProperties": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": [ + "IGNORE" + ] + } + }, + "description": "Identifies overrides for the signatures contained within the associated security strategy.\n", + "example": { + "1234": { + "action": "IGNORE" + }, + "1235": { + "action": "IGNORE" + } + } + } + } + } + }, + "APIKeyClientAuth": { + "description": "Defines how an API client should provide their API Key credentials.", + "required": [ + "keyLocation" + ], + "properties": { + "keyLocation": { + "type": "string", + "enum": [ + "HEADER", + "QUERY_PARAM" + ] + }, + "key": { + "type": "string" + } + } + }, + "JWTClientAuth": { + "description": "Defines how an API Client should provide their JWT.", + "required": [ + "keyLocation" + ], + "properties": { + "keyLocation": { + "type": "string", + "enum": [ + "BEARER", + "HEADER", + "QUERY_PARAM", + "COOKIE" + ] + }, + "key": { + "type": "string" + } + } + }, + "ConditionalAuthPolicy": { + "description": "Defines further fine-grained access control on top of API Key or JWT Auth.", + "required": [ + "sourceType", + "comparisonType", + "comparisonValues", + "action", + "denyStatusCode" + ], + "properties": { + "sourceType": { + "type": "string", + "enum": [ + "HEADER", + "JWT_CLAIM" + ] + }, + "sourceKey": { + "type": "string" + }, + "comparisonType": { + "type": "string", + "enum": [ + "EQUALS", + "NOT_EQUALS", + "IN", + "CONTAINS" + ] + }, + "comparisonValues": { + "type": "array", + "description": "Valid values for the sourceType.", + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + } + }, + "action": { + "type": "string", + "enum": [ + "ALLOW", + "DENY" + ] + }, + "denyStatusCode": { + "type": "integer" + } + } + }, + "RateLimit": { + "required": [ + "rate" + ], + "properties": { + "rate": { + "type": "string", + "pattern": "^[0-9]+r\\/[m|s]{1}$", + "description": "Sets the maximum number of allowed requests.\n\nYou can specify the rate limit as requests per second (r/s) or requests per minute (r/m).\n", + "example": "10r/s" + }, + "burstBeforeReject": { + "type": "integer", + "minimum": 0 + }, + "burstBeforeDelay": { + "type": "integer", + "minimum": 0 + }, + "statusCode": { + "type": "integer", + "default": 429 + }, + "key": { + "type": "string", + "default": "$binary_remote_addr", + "description": "Parameters (NGINX variable) for a shared memory zone that stores states for various keys; used for `limit_req_zone`." + } + } + }, + "Security": { + "type": "object", + "description": "Defines the desired security configurations for the application component.", + "properties": { + "strategyRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "waf": { + "$ref": "#/components/schemas/WAF" + }, + "identityProviderRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + }, + "description": "The list of Identity Providers that are used in this Security policy." + }, + "apiKeyClientAuth": { + "$ref": "#/components/schemas/APIKeyClientAuth" + }, + "jwtClientAuth": { + "$ref": "#/components/schemas/JWTClientAuth" + }, + "conditionalAuthPolicies": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ConditionalAuthPolicy" + } + }, + "rateLimits": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/RateLimit" + } + }, + "interceptWorkloadErrors": { + "x-f5-experimental": true, + "$ref": "#/components/schemas/ServiceConfigState" + } + } + }, + "PublishedApiRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "ComponentWebDesiredState": { + "description": "The desired settings in the Web Component that the user wants in the configuration on NGINX instances associated with\nthe Gateways which this component references.\n", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentStateCommon" + }, + { + "type": "object", + "required": [ + "ingress", + "backend" + ], + "properties": { + "componentType": { + "description": "Defines the Component type. The default type is Web.", + "type": "string", + "enum": [ + "WEB" + ] + }, + "ingress": { + "$ref": "#/components/schemas/ComponentWebIngress" + }, + "backend": { + "$ref": "#/components/schemas/WebBackend" + }, + "programmability": { + "$ref": "#/components/schemas/Programmability" + }, + "logging": { + "$ref": "#/components/schemas/Logging" + }, + "security": { + "$ref": "#/components/schemas/Security" + }, + "publishedApiRefs": { + "$ref": "#/components/schemas/PublishedApiRefs" + }, + "errorSetRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "compression": { + "$ref": "#/components/schemas/Compression" + } + } + } + ] + }, + "ComponentTcpUdpIngress": { + "description": "Ingress settings in a TCP/UDP Component.", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentIngressCommon" + } + ] + }, + "TcpUdpMonitoring": { + "description": "Health monitor settings across all workload groups in a TCP/UDP Component.", + "allOf": [ + { + "$ref": "#/components/schemas/MonitoringCommon" + }, + { + "type": "object", + "properties": { + "send": { + "description": "Probe request for a TCP/UDP monitor.", + "type": "string" + }, + "response": { + "description": "Case-sensitive regular expression for the expected success response to a TCP/UDP monitor.", + "type": "string" + } + } + } + ] + }, + "TcpUdpProxy": { + "description": "Proxy retry and timeout settings applicable to servers in a TcpUdp workloadGroup associated with a Component.", + "type": "object", + "properties": { + "nextUpstream": { + "description": "When a connection to the proxied server cannot be established, determines whether a client connection will be passed to the next server.", + "type": "string", + "enum": [ + "ON", + "OFF" + ], + "default": "OFF" + }, + "connectTimeout": { + "description": "Defines a timeout for establishing a connection with the proxied server.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$" + }, + "proxyTimeout": { + "description": "Sets the timeout between two successive read or write operations on client or proxied server connections.", + "type": "string", + "pattern": "^[0-9]{1,12}[hms]?$" + } + } + }, + "TcpUdpWorkloadGroup": { + "description": "Group of servers hosting a part of a TCP/UDP application represented by a Component.", + "allOf": [ + { + "$ref": "#/components/schemas/WorkloadGroupCommon" + }, + { + "type": "object", + "properties": { + "proxy": { + "$ref": "#/components/schemas/TcpUdpProxy" + }, + "uris": { + "type": "object", + "description": "The URI for a server hosting a part of a TCP/UDP application.\n\nThe URI must conform to the format `schema://address:port`\nwhere schema is chosen from tcp, udp, or tcp+tls, address is IP or hostname.\nAll three of schema, address, and port must be provided.\n\nFor example:\n\n- `tcp://192.0.2.247:8443`\n- `tcp+tls://192.0.2.247:8449`\n- `udp://www.f5workload.com:989`\n", + "additionalProperties": { + "$ref": "#/components/schemas/WorkloadUri" + } + } + } + } + ] + }, + "TcpUdpBackend": { + "description": "Backend settings in a TCP/UDP Component.\n", + "type": "object", + "properties": { + "monitoring": { + "$ref": "#/components/schemas/TcpUdpMonitoring" + }, + "workloadGroups": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/TcpUdpWorkloadGroup" + } + } + } + }, + "ComponentTcpUdpDesiredState": { + "description": "The desired settings in the TCP/UDP Component to use in the configuration on NGINX instances associated with the Gateways that this Component references.", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentStateCommon" + }, + { + "type": "object", + "required": [ + "componentType", + "ingress", + "backend" + ], + "properties": { + "componentType": { + "description": "Defines what type of Component this is. The type must be TCPUDP.", + "type": "string", + "enum": [ + "TCPUDP" + ] + }, + "ingress": { + "$ref": "#/components/schemas/ComponentTcpUdpIngress" + }, + "backend": { + "$ref": "#/components/schemas/TcpUdpBackend" + }, + "logging": { + "$ref": "#/components/schemas/Logging" + } + } + } + ] + }, + "ComponentDesiredState": { + "description": "The desired component settings to use in the NGINX instance configuration that's associated with\nthe Gateways that this component references.\n", + "oneOf": [ + { + "$ref": "#/components/schemas/ComponentWebDesiredState" + }, + { + "$ref": "#/components/schemas/ComponentTcpUdpDesiredState" + } + ] + }, + "ComponentWebCurrentStatus": { + "description": "The current snapshot of the web component settings that are reflected in the configuration on NGINX instances associated with\nthe Gateways that this component references.\n\nThese settings should converge to those in Desired State as the new configuration is applied unless there are issues;\nthe State setting gives a summary of how the convergence is proceeding.\n", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentStateCommon" + }, + { + "type": "object", + "properties": { + "componentType": { + "description": "Defines what type of Component this is.", + "type": "string", + "enum": [ + "WEB" + ] + }, + "ingress": { + "$ref": "#/components/schemas/ComponentWebIngress" + }, + "backend": { + "$ref": "#/components/schemas/WebBackend" + }, + "programmability": { + "$ref": "#/components/schemas/Programmability" + }, + "logging": { + "$ref": "#/components/schemas/Logging" + }, + "security": { + "$ref": "#/components/schemas/Security" + }, + "publishedApiRefs": { + "$ref": "#/components/schemas/PublishedApiRefs" + }, + "errorSetRef": { + "$ref": "#/components/schemas/ResourceRef" + }, + "compression": { + "$ref": "#/components/schemas/Compression" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + } + ] + }, + "ComponentTcpUdpCurrentStatus": { + "description": "The current snapshot of the TCP/UDP component settings that are reflected in the configuration on NGINX instances associated with\nthe Gateways that this component references.\n\nThese settings should converge to those in Desired State as the new configuration is applied unless there are issues;\nthe State setting gives a summary of how the convergence is proceeding.\n", + "allOf": [ + { + "$ref": "#/components/schemas/ComponentStateCommon" + }, + { + "type": "object", + "properties": { + "componentType": { + "description": "Defines what type of Component this is.", + "type": "string", + "enum": [ + "TCPUDP" + ] + }, + "ingress": { + "$ref": "#/components/schemas/ComponentTcpUdpIngress" + }, + "backend": { + "$ref": "#/components/schemas/TcpUdpBackend" + }, + "logging": { + "$ref": "#/components/schemas/Logging" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + } + ] + }, + "ComponentCurrentStatus": { + "description": "The current snapshot of the component settings that are reflected in the configuration on NGINX instances associated with\nthe Gateways that this Component references.\n\nThese settings should converge to those in Desired State as the new configuration is applied unless there are issues;\nthe State setting gives a summary of how the convergence is proceeding.\n", + "oneOf": [ + { + "$ref": "#/components/schemas/ComponentWebCurrentStatus" + }, + { + "$ref": "#/components/schemas/ComponentTcpUdpCurrentStatus" + } + ] + }, + "Component": { + "description": "A component represents the processing –- reverse proxying, rate limiting, security policy enforcement, header rewrites, etc.\n–- of traffic associated with a logical part (for example, microservice) of an application/API. It also defines the subsequent\nload balancing of traffic to workloads implementing that part of the application/API.\n\nA component can be either a web or a TCP/UDP component –- indicated by the component type. Web components are used to\nconfigure NGINX functionality associated with HTTP/HTTPS protocols and inherit web and common settings from linked Gateways.\nTCP/UDP components are used to configure NGINX functionality associated with TCP/UDP protocols\nand inherit TCP/UDP and common settings from linked Gateways.\n", + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/ComponentDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/ComponentCurrentStatus" + } + } + }, + "ComponentName": { + "type": "string" + }, + "Location": { + "required": [ + "metadata", + "desiredState" + ], + "type": "object", + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/LocationState" + }, + "currentStatus": { + "$ref": "#/components/schemas/LocationState" + } + } + }, + "GetLocationResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/Location" + } + ] + }, + "ListLocationResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Location" + } + } + } + }, + "LocationState": { + "oneOf": [ + { + "$ref": "#/components/schemas/OtherLocation" + }, + { + "$ref": "#/components/schemas/AWSLocation" + }, + { + "$ref": "#/components/schemas/AzureLocation" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "OTHER_LOCATION": "#/components/schemas/OtherLocation", + "AWS_LOCATION": "#/components/schemas/AWSLocation", + "AZURE_LOCATION": "#/components/schemas/AzureLocation" + } + } + }, + "AWSLocation": { + "type": "object", + "required": [ + "vpcID", + "region", + "integrationRef", + "type" + ], + "properties": { + "type": { + "type": "string", + "description": "The location type. Select the environment where you want to create the location. \nTo create a location that’s specific to the AWS cloud environment, select AWS_LOCATION.\n", + "enum": [ + "AWS_LOCATION" + ] + }, + "region": { + "description": "The AWS region.\n", + "type": "string" + }, + "vpcID": { + "type": "string", + "description": "The vpcID of the AWS Virtual Private Cloud (VPC) where new Instances created under this location should reside.\nThe VPC must be in the specified AWS region.\n" + }, + "integrationRef": { + "$ref": "#/components/schemas/ResourceRef" + } + } + }, + "AzureLocation": { + "x-f5-experimental": true, + "type": "object", + "required": [ + "type", + "region", + "resourceGroup", + "subscriptionID", + "integrationRef" + ], + "properties": { + "type": { + "type": "string", + "description": "The location type.", + "enum": [ + "AZURE_LOCATION" + ] + }, + "region": { + "description": "The Azure region.", + "type": "string" + }, + "resourceGroup": { + "type": "string", + "description": "The name of the resourceGroup." + }, + "subscriptionID": { + "type": "string", + "description": "The unique alphanumeric string that identifies the Azure subscription." + }, + "integrationRef": { + "description": "Integration ref.", + "$ref": "#/components/schemas/ResourceRef" + } + } + }, + "OtherLocation": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "description": "The location type. Select the environment where you want to create the location. \nTo create a location that’s not specific to any cloud environment, select OTHER_LOCATION.\n", + "enum": [ + "OTHER_LOCATION" + ] + } + } + }, + "InstanceTemplate": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/InstanceTemplateState" + }, + "currentStatus": { + "$ref": "#/components/schemas/InstanceTemplateState" + } + } + }, + "GetInstanceTemplateResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/InstanceTemplate" + } + ] + }, + "ListInstanceTemplateResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InstanceTemplate" + } + } + } + }, + "InstanceTemplateState": { + "oneOf": [ + { + "$ref": "#/components/schemas/AWSInstanceTemplate" + }, + { + "$ref": "#/components/schemas/AzureInstanceTemplate" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AWS_INSTANCE_TEMPLATE": "#/components/schemas/AWSInstanceTemplate", + "AZURE_INSTANCE_TEMPLATE": "#/components/schemas/AzureInstanceTemplate" + } + } + }, + "AWSInstanceTemplate": { + "type": "object", + "required": [ + "type", + "amiID", + "instanceType", + "subnetID" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of Instance Template.", + "enum": [ + "AWS_INSTANCE_TEMPLATE" + ] + }, + "amiID": { + "type": "string", + "description": "The AWS `amiID` for the image to use when deploying an Instance using the template.\n" + }, + "instanceType": { + "type": "string", + "description": "The machine size.\n" + }, + "subnetID": { + "type": "string", + "description": "The `subnetID` of the AWS subnet where new Instances created using the Instance Template should reside.\n\nThe specified subnet must be in the same AWS Virtual Private Cloud (VPC) as the Instance Template's parent Location resource.\n" + }, + "securityGroupIDs": { + "type": "array", + "description": "The list of AWS securityGroupIDs that you want to apply to new Instances. \n\nThe Security GroupIDs must be available in the same AWS region and Virtual Private Cloud (VPC) as the Instance Template's parent Location resource.\n", + "items": { + "type": "string" + } + }, + "publicKey": { + "type": "string", + "description": "Provide the public key that you want to use to authenticate to the EC2 instance that this template will create. \n" + }, + "associatePublicIPAddress": { + "type": "boolean", + "description": "Specify if a public IP address should be assigned to the instance.\n" + } + } + }, + "AzureInstanceTemplate": { + "x-f5-experimental": true, + "type": "object", + "required": [ + "type", + "instanceType", + "image", + "networkInterface", + "adminUser", + "publicKey" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of Instance Template.", + "enum": [ + "AZURE_INSTANCE_TEMPLATE" + ] + }, + "image": { + "$ref": "#/components/schemas/AzureImage" + }, + "instanceType": { + "type": "string", + "description": "The virtual machine size and type." + }, + "networkInterface": { + "$ref": "#/components/schemas/AzureNetworkInterface" + }, + "adminUser": { + "type": "string", + "description": "The name of the administration account." + }, + "publicKey": { + "type": "string", + "description": "The Public Key string for the adminUser." + } + } + }, + "AzureImage": { + "type": "object", + "oneOf": [ + { + "$ref": "#/components/schemas/AzureImageID" + }, + { + "$ref": "#/components/schemas/AzureImageReference" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AZURE_IMAGE_ID": "#/components/schemas/AzureImageID", + "AZURE_IMAGE_REFERENCE": "#/components/schemas/AzureImageReference" + } + } + }, + "AzureImageID": { + "type": "object", + "description": "The Azure resource ID for the image to use when deploying an Instance.", + "required": [ + "type", + "imageID" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of the Azure Image.", + "enum": [ + "AZURE_IMAGE_ID" + ] + }, + "imageID": { + "type": "string", + "description": "The resource ID of the Azure image." + } + } + }, + "AzureImageReference": { + "type": "object", + "description": "The parameters that identify which Azure Marketplace image to use for the Instance.", + "required": [ + "type", + "publisher", + "offer", + "sku" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of the Azure Image.", + "enum": [ + "AZURE_IMAGE_REFERENCE" + ] + }, + "publisher": { + "type": "string", + "description": "The publisher of the Azure Marketplace image." + }, + "offer": { + "type": "string", + "description": "The offer of the Azure Marketplace image." + }, + "sku": { + "type": "string", + "description": "The SKU of the Azure Marketplace image." + }, + "version": { + "type": "string", + "description": "The version of the Azure Marketplace image (default is latest)." + } + } + }, + "AzureNetworkID": { + "type": "object", + "description": "Identifies the existing Azure Network Interface that you want the Instance to use.", + "required": [ + "type", + "nicID" + ], + "properties": { + "type": { + "type": "string", + "description": "The type of the Azure Network Interface.", + "enum": [ + "AZURE_NIC_ID" + ] + }, + "nicID": { + "type": "string", + "description": "The ID of the Azure Network Interface." + } + } + }, + "AppList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/App" + } + } + } + }, + "App": { + "type": "object", + "description": "An App is a collection of Components.", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "x-f5-experimental": true, + "type": "object" + }, + "currentStatus": { + "$ref": "#/components/schemas/AppCurrentStatus" + } + } + }, + "AppCurrentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "componentRefs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "Integration": { + "x-f5-experimental": true, + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/IntegrationState" + }, + "currentStatus": { + "$ref": "#/components/schemas/IntegrationState" + } + }, + "example": { + "metadata": { + "name": "my-aws-integration", + "endpointUri": "https://ec2.us-west-1.amazonaws.com", + "description": "AWS integration for us-west-1", + "displayName": "UsWest1-Integration", + "tags": [ + "us-west-1" + ], + "uid": "619887de-a748-4931-853d-c6b706f95ddf", + "createTime": "2019-09-18T16:42:15.1183523Z", + "updateTime": "2019-09-18T16:42:15.1183523Z" + }, + "desiredState": { + "type": "AWS_INTEGRATION", + "credential": { + "type": "AWS_ACCESS_KEY_CREDENTIAL", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + }, + "currentStatus": { + "type": "AWS_INTEGRATION", + "credential": { + "type": "AWS_ACCESS_KEY_CREDENTIAL", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + } + } + }, + "GetIntegrationResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/Integration" + } + ] + }, + "ListIntegrationResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Integration" + } + } + } + }, + "IntegrationState": { + "oneOf": [ + { + "$ref": "#/components/schemas/AWSIntegration" + } + ], + "discriminator": { + "propertyName": "type" + } + }, + "AWSIntegration": { + "required": [ + "type", + "credential" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "AWS_INTEGRATION" + ] + }, + "endpointUri": { + "type": "string" + }, + "credential": { + "oneOf": [ + { + "$ref": "#/components/schemas/AWSAccessKeyCredential" + } + ], + "discriminator": { + "propertyName": "type" + } + } + } + }, + "AWSAccessKeyCredential": { + "required": [ + "type", + "accessKeyID", + "secretAccessKey" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "AWS_ACCESS_KEY_CREDENTIAL" + ] + }, + "accessKeyID": { + "type": "string" + }, + "secretAccessKey": { + "type": "string" + } + } + }, + "PolicyDesiredState": { + "type": "object", + "description": "The defired state of the policy", + "required": [ + "content" + ], + "properties": { + "content": { + "type": "object", + "$ref": "#/components/schemas/PolicyData", + "example": "{\"policy\": {\"name\": \"/Common/NAPBlockingDefaultPolicy\", \"template\": {\"name\": \"POLICY_TEMPLATE_NGINX_BASE\"}, \"applicationLanguage\": \"utf-8\", \"enforcementMode\": \"blocking\", \"signatures\": [], \"bot-defense\": {\"settings\": {\"isEnabled\": false}}, \"headers\": [{\"name\": \"*\", \"type\": \"wildcard\", \"decodeValueAsBase64\": \"disabled\"}, {\"name\": \"*-bin\", \"type\": \"wildcard\", \"decodeValueAsBase64\": \"required\"},{\"name\": \"Referer\", \"type\": \"explicit\", \"decodeValueAsBase64\": \"disabled\"}, {\"name\": \"Authorization\", \"type\": \"explicit\", \"decodeValueAsBase64\": \"disabled\"}, {\"name\": \"Transfer-Encoding\", \"type\": \"explicit\", \"decodeValueAsBase64\": \"disabled\"}], \"cookies\": [{\"name\": \"*\", \"type\": \"wildcard\", \"decodeValueAsBase64\": \"disabled\"}], \"parameters\": [{\"name\": \"*\", \"type\": \"wildcard\", \"decodeValueAsBase64\": \"disabled\"}]}}" + } + } + }, + "PolicyList": { + "type": "object", + "required": [ + "items" + ], + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PolicyStatus" + } + } + } + }, + "PolicyStatus": { + "type": "object", + "required": [ + "metadata", + "desiredState", + "currentStatus" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/PolicyDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/PolicyCurrentStatus" + } + } + }, + "PolicyCurrentStatus": { + "type": "object", + "description": "Shows the current status of the policy.", + "required": [ + "state", + "data" + ], + "properties": { + "content": { + "type": "object", + "$ref": "#/components/schemas/PolicyData" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + }, + "Policy": { + "type": "object", + "description": "Contains the policy to upload.", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/PolicyDesiredState" + } + } + }, + "PolicyData": { + "type": "object", + "description": "Contains the policy to upload." + }, + "Gateway": { + "type": "object", + "description": "A Gateway represents the initial network entry point of application and/or API traffic into an NGINX instance that is\nin the data path of this traffic. Gateway settings are combined with Component settings that reference the Gateway;\nthe resulting composite config is sent to the NGINX instances that the Gateway references.\n\nA Gateway can be referenced by either web components and/or TCP/UDP components. Web-only settings in the Gateway (for example, web URIs)\napply only to web components. TCP/UDP-only settings in the Gateway (for example, TCP/UDP URIs) apply only to TCP/UDP components.\nWeb and TCP/UDP common settings in the Gateway (for example, global TLS, socket) apply to both web and TCP/UDP components.\n\nExamples:\nGateway with web and TCP/UDP URIs, web-specific settings, common settings. Web URIs and web settings apply to web components.\nCommon settings apply to both component types. TCP/UDP URIs apply only to TCP/UDP components –- presently, there are no other TCP/UDP only settings.\n\nGateway web URIs (for example, https://www.xyz.com) combined with web component URIs (for example, /blog) define web URI config (https://www.xyz.com/blog).\nComponent TCP/UDP URIs define TCP/UDP URI config (for example, tcp+tls://192.168.1.1:200); Gateway TCP/UDP URIs can provide TLS info plus restrict\nwhich TCP/UDP URIs can be in the component. Alternatively, component URIs fully define the URI config if the Gateway has no URIs.\n\nIf only a single URI type is in the Gateway, the URI config for that type is determined by combining the Gateway URIs with\nthe URIs from components of that type; only component URIs are used for the URI config for the other type.\n\nFor HTTPS URIs, global Gateway TLS settings are used when more specific TLS info is not present in the Gateway URIs,\nor for component URIs that have an HTTPS protocol and hostnames with no specific URI or component global TLS settings defined.\n\nFor tcp+tls URIs, Gateway TLS settings are used when TLS info is not defined in a component URI or component global TLS.\nA Gateway URI’s TLS info is used if it encompasses the component URI. For example, tcp+tls://192.168.1.5:100-104 in the Gateway and\ncomponent URI of tcp+tls://192.168.1.5:100. Global Gateway TLS is used if no other TLS settings apply.\n", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/GatewayDesiredState" + }, + "currentStatus": { + "$ref": "#/components/schemas/GatewayCurrentStatus" + } + } + }, + "GatewayCurrentStatus": { + "description": "The current snapshot of the gateway settings that are reflected in the configuration on NGINX instances referenced by this Gateway.\nThese settings should converge to those in Desired State as the new configuration is applied unless there are issues;\nthe State setting gives a summary of how the convergence is proceeding.\n", + "allOf": [ + { + "$ref": "#/components/schemas/GatewayWebState" + }, + { + "$ref": "#/components/schemas/GatewayStateCommon" + }, + { + "type": "object", + "properties": { + "ingress": { + "$ref": "#/components/schemas/GatewayIngress" + }, + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + } + ] + } + }, + "responses": { + "BadRequest": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "Unauthorized": { + "description": "User authentication is invalid or missing.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "NotFound": { + "description": "The specified instance group resource was not found.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "Conflict": { + "description": "The request failed due to a conflict with an existing instance group resource.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "NotAllowed": { + "description": "The request is not allowed on the specified resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "code": 120322, + "message": "Error deleting the location: the name 'unspecified' is reserved. Specify a different name for the location, then try again." + } + } + } + } + }, + "examples": { + "AWSInstanceRequest": { + "value": { + "metadata": { + "name": "nginx-dp-1", + "displayName": "nginx-dp-1", + "tags": [ + "prod-1", + "dev-1" + ] + }, + "desiredState": { + "type": "AWS_INSTANCE", + "templateRef": { + "ref": "/infrastructure/locations/aws-east/instance-templates/small-dev-template" + } + } + } + }, + "AzureInstanceRequest": { + "value": { + "metadata": { + "name": "nginx-dp-1", + "displayName": "nginx-dp-1", + "tags": [ + "prod-1", + "dev-1" + ] + }, + "desiredState": { + "type": "AZURE_INSTANCE", + "templateRef": { + "ref": "/infrastructure/locations/azure-westus2/instance-templates/small-dev-template" + } + } + } + }, + "InstanceUpdateRequest": { + "value": { + "metadata": { + "name": "nginx-dp-1", + "displayName": "nginx-dp-1", + "description": "An example NGINX Instance.", + "tags": [ + "prod-1", + "dev-1" + ] + } + } + }, + "AWSInstance": { + "value": { + "metadata": { + "name": "instance-1", + "displayName": "My Instance", + "tags": [ + "prod-1", + "dev-1" + ], + "links": { + "rel": "/infrastructure/locations/unspecified/instances/instance-1" + }, + "createTime": "2019-07-29T09:12:33.001Z", + "updateTime": "2019-07-29T09:12:33.001Z" + }, + "desiredState": { + "type": "AWS_INSTANCE", + "templateRef": { + "ref": "/infrastructure/locations/aws-uswest-2/instance-templates/my-t2-medium" + } + }, + "currentStatus": { + "type": "AWS_INSTANCE", + "state": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "hostname": "instance-1.mycloud.net", + "version": "1.17.3", + "muted": false, + "networkConfig": { + "networkInterfaces": [ + { + "name": "eth0", + "privateDnsName": "ip-172-16-0-71.us-west-2.compute.internal", + "publicDnsName": "ec2-54-212-110-173.us-west-2.compute.amazonaws.com", + "privateIP": "172.16.0.71", + "publicIP": "54.212.110.173", + "subnet": { + "subnetID": "subnet-055d28be58feb0a7d", + "cidrIPv4": "172.16.0.0/24" + } + } + ] + }, + "agent": { + "credentials": { + "hostname": "instance-1.mycloud.net", + "uuid": "c1088edfd9f35cd38d5b4ce109508fe9" + }, + "version": 2.8 + }, + "legacyNginxMetadata": { + "build": "nginx-plus-r19", + "built_from_source": false, + "last_seen": "2019-11-12T23:47:52.966Z", + "pid": 2749, + "properties": { + "conf-path": "/etc/nginx/nginx.conf", + "error-log-path": "/var/log/nginx/error.log", + "group": "nginx", + "http-client-body-temp-path": "/var/cache/nginx/client_temp", + "http-fastcgi-temp-path": "/var/cache/nginx/fastcgi_temp", + "http-log-path": "/var/log/nginx/access.log", + "http-proxy-temp-path": "/var/cache/nginx/proxy_temp", + "http-scgi-temp-path": "/var/cache/nginx/scgi_temp", + "http-uwsgi-temp-path": "/var/cache/nginx/uwsgi_temp", + "loadable_modules": [], + "lock-path": "/var/run/nginx.lock", + "modules-path": "/usr/lib/nginx/modules", + "packages": { + "nginx-plus": "19-1~bionic" + }, + "path": { + "bin": "/usr/sbin/nginx", + "conf": "/etc/nginx/nginx.conf" + }, + "pid-path": "/var/run/nginx.pid", + "prefix": "/etc/nginx", + "sbin-path": "/usr/sbin/nginx", + "status_module_enabled": false, + "stub_status_enabled": false, + "stub_status_url": null, + "user": "nginx", + "with-cc-opt": "-g -O2 -fdebug-prefix-map=/data/builder/debuild/nginx-plus-1.17.3/debian/debuild-base/nginx-plus-1.17.3=. -fstack-protector-strong -Wformat -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -fPIC", + "with-compat": true, + "with-file-aio": true, + "with-http_addition_module": true, + "with-http_auth_jwt_module": true, + "with-http_auth_request_module": true, + "with-http_dav_module": true, + "with-http_f4f_module": true, + "with-http_flv_module": true, + "with-http_gunzip_module": true, + "with-http_gzip_static_module": true, + "with-http_hls_module": true, + "with-http_mp4_module": true, + "with-http_random_index_module": true, + "with-http_realip_module": true, + "with-http_secure_link_module": true, + "with-http_session_log_module": true, + "with-http_slice_module": true, + "with-http_ssl_module": true, + "with-http_stub_status_module": true, + "with-http_sub_module": true, + "with-http_v2_module": true, + "with-ld-opt": "-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie", + "with-mail": true, + "with-mail_ssl_module": true, + "with-stream": true, + "with-stream_realip_module": true, + "with-stream_ssl_module": true, + "with-stream_ssl_preread_module": true, + "with-threads": true + }, + "running": true, + "ssl": { + "built": [ + "OpenSSL", + "1.1.1", + "11 Sep 2018" + ], + "run": [ + "OpenSSL", + "1.1.1", + "11 Sep 2018" + ] + }, + "start_time": 1573580604000 + }, + "legacySystemMetadata": { + "boot": 1573580280000, + "disk_partitions": [ + { + "device": "/dev/sda2", + "fstype": "ext4", + "mountpoint": "/" + }, + { + "device": "/dev/sda1", + "fstype": "ext3", + "mountpoint": "/boot" + } + ], + "network": { + "default": "lo", + "interfaces": { + "ipv4": { + "address": "127.0.0.1", + "netmask": "255.0.0.0", + "prefixlen": 8 + }, + "ipv6": { + "address": "::1", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "prefixlen": 128 + }, + "mac": "00:00:00:00:00:00", + "name": "lo" + } + }, + "os-type": "linux", + "processor": { + "architecture": "x86_64", + "cache": { + "L1d": "32K", + "L1i": "32K", + "L2": "1024K", + "L3": "25344K" + }, + "cores": 1, + "cpus": 4, + "hypervisor": "VMware", + "mhz": 2300, + "model": "Intel(R) Xeon(R) Gold 6140 CPU @ 2.30GHz", + "virtualization": "full" + }, + "release": { + "codename": "bionic", + "id": "ubuntu", + "name": "Ubuntu", + "version": "18.04.2 LTS (Bionic Beaver)", + "version_id": 18.04 + } + } + } + } + }, + "AzureInstance": { + "value": { + "metadata": { + "name": "instance-1", + "createTime": "2020-10-27T22:14:36.994172Z", + "description": "My Azure Instance", + "displayName": "Instance-1", + "tags": [ + "prod-1", + "dev-1" + ], + "kind": "instance", + "links": { + "rel": "/api/v1/infrastructure/locations/azure-westus2/instances/instance-1" + }, + "ref": "/infrastructure/locations/azure-westus2/instances/instance-1", + "uid": "4ed722ec-1bc0-47a1-9772-87718fa9ddb8", + "updateTime": "2020-10-27T22:14:36.994172Z" + }, + "desiredState": { + "nginx": { + "process": { + "group": "test", + "user": "testuser" + } + }, + "templateRef": { + "ref": "/infrastructure/locations/azure-westus2/instance-templates/azure-standard-ds1v2" + }, + "type": "AZURE_INSTANCE" + }, + "currentStatus": { + "networkConfig": { + "networkInterfaces": [ + { + "name": "my-nic-1", + "publicDnsName": "myapp.westus2.cloudapp.azure.com", + "privateIP": "10.0.1.4", + "publicIP": "52.229.16.198", + "subnet": { + "subnetID": "subnet-test", + "cidrIPv4": "10.0.1.0/24" + } + } + ] + }, + "agent": { + "credentials": { + "hostname": "instance-1.mycloud.net", + "uuid": "c1088edfd9f35cd38d5b4ce109508fe9" + }, + "version": "3.12.5" + }, + "hostname": "instance-1", + "instanceID": "ce266e67-42ec-41a0-b8f4-f4cd0be01828", + "legacyNginxMetadata": { + "build": "nginx-plus-r22", + "built_from_source": true, + "id": 6, + "last_seen": "2020-10-27T22:30:34.376659Z", + "pid": 1138, + "properties": { + "conf-path": "/etc/nginx/nginx.conf", + "error-log-path": "/var/log/nginx/error.log", + "group": "nginx", + "http-client-body-temp-path": "/var/cache/nginx/client_temp", + "http-fastcgi-temp-path": "/var/cache/nginx/fastcgi_temp", + "http-log-path": "/var/log/nginx/access.log", + "http-proxy-temp-path": "/var/cache/nginx/proxy_temp", + "http-scgi-temp-path": "/var/cache/nginx/scgi_temp", + "http-uwsgi-temp-path": "/var/cache/nginx/uwsgi_temp", + "loadable_modules": [ + "ngx_http_f5_metrics_module-debug.so", + "ngx_http_f5_metrics_module.so" + ], + "lock-path": "/var/run/nginx.lock", + "modules-path": "/usr/lib/nginx/modules", + "packages": { + "nginx-plus": "nginx-plus-r22" + }, + "path": { + "bin": "/usr/sbin/nginx", + "conf": "/etc/nginx/nginx.conf" + }, + "pid-path": "/var/run/nginx.pid", + "prefix": "/etc/nginx", + "sbin-path": "/usr/sbin/nginx", + "status_module_enabled": false, + "stub_status_enabled": false, + "stub_status_url": "", + "user": "nginx", + "with-compat": true, + "with-file-aio": true, + "with-http_addition_module": true, + "with-http_auth_jwt_module": true, + "with-http_auth_request_module": true, + "with-http_dav_module": true, + "with-http_f4f_module": true, + "with-http_flv_module": true, + "with-http_gunzip_module": true, + "with-http_gzip_static_module": true, + "with-http_hls_module": true, + "with-http_mp4_module": true, + "with-http_random_index_module": true, + "with-http_realip_module": true, + "with-http_secure_link_module": true, + "with-http_session_log_module": true, + "with-http_slice_module": true, + "with-http_ssl_module": true, + "with-http_stub_status_module": true, + "with-http_sub_module": true, + "with-http_v2_module": true, + "with-ld-opt": "'-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'", + "with-mail": true, + "with-mail_ssl_module": true, + "with-stream": true, + "with-stream_realip_module": true, + "with-stream_ssl_module": true, + "with-stream_ssl_preread_module": true, + "with-threads": true + }, + "running": true, + "ssl": { + "built": [ + "OpenSSL", + "1.1.1", + " 11 Sep 2018" + ], + "run": [ + "OpenSSL", + "1.1.1", + " 11 Sep 2018" + ] + }, + "start_time": 1603836993, + "version": "1.19.0" + }, + "legacySystemMetadata": { + "boot": 1603836911000, + "disk_partitions": [ + { + "device": "/dev/sda1", + "fstype": "ext4", + "mountpoint": "/" + }, + { + "device": "/dev/sda15", + "fstype": "vfat", + "mountpoint": "/boot/efi" + }, + { + "device": "/dev/sdb1", + "fstype": "ext4", + "mountpoint": "/mnt" + } + ], + "id": 5, + "network": { + "default": "eth0", + "interfaces": [ + { + "ipv4": { + "address": "127.0.0.1", + "netmask": "255.0.0.0", + "prefixlen": 8 + }, + "ipv6": { + "address": "::1", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "prefixlen": 128 + }, + "mac": "00:00:00:00:00:00", + "name": "lo" + }, + { + "ipv4": { + "address": "10.0.1.4", + "netmask": "255.255.255.0", + "prefixlen": 24 + }, + "ipv6": { + "address": "fe80::20d:3aff:fec5:3f80", + "netmask": "ffff:ffff:ffff:ffff::", + "prefixlen": 64 + }, + "mac": "00:0d:3a:c5:3f:80", + "name": "eth0" + } + ] + }, + "os-type": "linux", + "processor": { + "architecture": "x86_64", + "cache": { + "L1d": "32K", + "L1i": "32K", + "L2": "1024K", + "L3": "36608K" + }, + "cores": "1", + "cpus": "1", + "hypervisor": "Microsoft", + "mhz": "2095.191", + "model": "Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz", + "virtualization": "full" + }, + "release": { + "codename": "bionic", + "id": "ubuntu", + "name": "Ubuntu", + "version": "18.04.5 LTS (Bionic Beaver)", + "version_id": "18.04" + } + }, + "muted": false, + "nginx": { + "process": { + "group": "test", + "user": "testuser" + } + }, + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "templateRef": { + "ref": "/infrastructure/locations/azure-westus2/instance-templates/azure-standard-ds1v2" + }, + "type": "AZURE_INSTANCE", + "version": "1.19.0" + } + } + }, + "OtherInstance": { + "value": { + "metadata": { + "name": "test_instance_1", + "displayName": "Test Instance 1", + "links": { + "rel": "/api/v1/infrastructure/locations/unspecified/instances/test_instance_1" + }, + "createTime": "2020-05-20T20:02:18.107875Z", + "updateTime": "2020-05-20T20:02:18.107875Z" + }, + "desiredState": { + "type": "OTHER_INSTANCE" + }, + "currentStatus": { + "type": "OTHER_INSTANCE", + "state": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "status": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "agent": { + "credentials": { + "hostname": "instance-1.mycloud.net", + "uuid": "c1088edfd9f35cd38d5b4ce109508fe9" + }, + "version": "999.0.0-1" + }, + "hostname": "test-fab4edf8-data-1.test", + "legacyNginxMetadata": { + "build": "nginx-plus-r21", + "built_from_source": false, + "id": 2, + "last_seen": "2020-05-20T20:40:21.146894Z", + "pid": 2995, + "properties": { + "conf-path": "/etc/nginx/nginx.conf", + "error-log-path": "/var/log/nginx/error.log", + "group": "nginx", + "http-client-body-temp-path": "/var/cache/nginx/client_temp", + "http-fastcgi-temp-path": "/var/cache/nginx/fastcgi_temp", + "http-log-path": "/var/log/nginx/access.log", + "http-proxy-temp-path": "/var/cache/nginx/proxy_temp", + "http-scgi-temp-path": "/var/cache/nginx/scgi_temp", + "http-uwsgi-temp-path": "/var/cache/nginx/uwsgi_temp", + "loadable_modules": [ + "ngx_http_f5_metrics_module-debug.so", + "ngx_http_f5_metrics_module.so" + ], + "lock-path": "/var/run/nginx.lock", + "modules-path": "/usr/lib/nginx/modules", + "packages": { + "nginx-plus": "21-1~bionic" + }, + "path": { + "bin": "/usr/sbin/nginx", + "conf": "/etc/nginx/nginx.conf" + }, + "pid-path": "/var/run/nginx.pid", + "prefix": "/etc/nginx", + "sbin-path": "/usr/sbin/nginx", + "status_module_enabled": false, + "stub_status_enabled": false, + "stub_status_url": null, + "user": "nginx", + "with-cc-opt": "'-g -O2 -fdebug-prefix-map=/data/builder/debuild/nginx-plus-1.17.9/debian/debuild-base/nginx-plus-1.17.9=. -fstack-protector-strong -Wformat -Werror=format-security,-D_FORTIFY_SOURCE=2 -fPIC'", + "with-compat": true, + "with-file-aio": true, + "with-http_addition_module": true, + "with-http_auth_jwt_module": true, + "with-http_auth_request_module": true, + "with-http_dav_module": true, + "with-http_f4f_module": true, + "with-http_flv_module": true, + "with-http_gunzip_module": true, + "with-http_gzip_static_module": true, + "with-http_hls_module": true, + "with-http_mp4_module": true, + "with-http_random_index_module": true, + "with-http_realip_module": true, + "with-http_secure_link_module": true, + "with-http_session_log_module": true, + "with-http_slice_module": true, + "with-http_ssl_module": true, + "with-http_stub_status_module": true, + "with-http_sub_module": true, + "with-http_v2_module": true, + "with-ld-opt": "'-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'", + "with-mail": true, + "with-mail_ssl_module": true, + "with-stream": true, + "with-stream_realip_module": true, + "with-stream_ssl_module": true, + "with-stream_ssl_preread_module": true, + "with-threads": true + }, + "running": true, + "ssl": { + "built": [ + "OpenSSL", + "1.1.1", + "11 Sep 2018" + ], + "run": [ + "OpenSSL", + "1.1.1", + "11 Sep 2018" + ] + }, + "start_time": 1590004864000 + }, + "legacySystemMetadata": { + "boot": 1590004492000, + "disk_partitions": [ + { + "device": "/dev/sda2", + "fstype": "ext4", + "mountpoint": "/" + }, + { + "device": "/dev/sda1", + "fstype": "ext3", + "mountpoint": "/boot" + } + ], + "id": 1, + "network": { + "default": "lo", + "interfaces": [ + { + "ipv4": { + "address": "127.0.0.1", + "netmask": "255.0.0.0", + "prefixlen": 8 + }, + "ipv6": { + "address": "::1", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "prefixlen": 128 + }, + "mac": "00:00:00:00:00:00", + "name": "lo" + }, + { + "ipv4": { + "address": "192.0.10.1", + "netmask": "255.255.255.0", + "prefixlen": 24 + }, + "mac": "02:42:98:fb:40:48", + "name": "docker0" + }, + { + "ipv4": { + "address": "10.149.41.181", + "netmask": "255.255.240.0", + "prefixlen": 20 + }, + "ipv6": { + "address": "fe80::250:56ff:fe98:e2f1", + "netmask": "ffff:ffff:ffff:ffff::", + "prefixlen": 64 + }, + "mac": "00:50:56:98:e2:f1", + "name": "ens32" + } + ] + }, + "os-type": "linux", + "processor": { + "architecture": "x86_64", + "cache": { + "L1d": "32K", + "L1i": "32K", + "L2": "1024K", + "L3": "25344K" + }, + "cores": "1", + "cpus": "4", + "hypervisor": "VMware", + "mhz": "2300.000", + "model": "Intel(R) Xeon(R) Gold 6140 CPU @ 2.30GHz", + "virtualization": "full" + }, + "release": { + "codename": "bionic", + "id": "ubuntu", + "name": "Ubuntu", + "version": "18.04.2 LTS (Bionic Beaver)", + "version_id": "18.04" + } + }, + "muted": false, + "version": "1.17.9" + } + } + }, + "ListInstanceResponse": { + "value": { + "items": [ + { + "currentStatus": { + "agent": { + "version": "3.7.44" + }, + "hostname": "i-563457274582", + "legacyNginxMetadata": { + "build": "nginx-plus-r22", + "built_from_source": false, + "id": 2, + "last_seen": "2020-07-08T17:24:07.869745Z", + "pid": 3104, + "properties": { + "conf-path": "/etc/nginx/nginx.conf", + "error-log-path": "/var/log/nginx/error.log", + "group": "nginx", + "http-client-body-temp-path": "/var/cache/nginx/client_temp", + "http-fastcgi-temp-path": "/var/cache/nginx/fastcgi_temp", + "http-log-path": "/var/log/nginx/access.log", + "http-proxy-temp-path": "/var/cache/nginx/proxy_temp", + "http-scgi-temp-path": "/var/cache/nginx/scgi_temp", + "http-uwsgi-temp-path": "/var/cache/nginx/uwsgi_temp", + "loadable_modules": [ + "ngx_http_f5_metrics_module-debug.so", + "ngx_http_f5_metrics_module.so" + ], + "lock-path": "/var/run/nginx.lock", + "modules-path": "/usr/lib/nginx/modules", + "packages": {}, + "path": { + "bin": "/usr/sbin/nginx", + "conf": "/etc/nginx/nginx.conf" + }, + "pid-path": "/var/run/nginx.pid", + "prefix": "/etc/nginx", + "sbin-path": "/usr/sbin/nginx", + "status_module_enabled": false, + "stub_status_enabled": false, + "stub_status_url": "", + "user": "nginx", + "with-compat": true, + "with-file-aio": true, + "with-http_addition_module": true, + "with-http_auth_jwt_module": true, + "with-http_auth_request_module": true, + "with-http_dav_module": true, + "with-http_f4f_module": true, + "with-http_flv_module": true, + "with-http_gunzip_module": true, + "with-http_gzip_static_module": true, + "with-http_hls_module": true, + "with-http_mp4_module": true, + "with-http_random_index_module": true, + "with-http_realip_module": true, + "with-http_secure_link_module": true, + "with-http_session_log_module": true, + "with-http_slice_module": true, + "with-http_ssl_module": true, + "with-http_stub_status_module": true, + "with-http_sub_module": true, + "with-http_v2_module": true, + "with-ld-opt": "'-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'", + "with-mail": true, + "with-mail_ssl_module": true, + "with-stream": true, + "with-stream_realip_module": true, + "with-stream_ssl_module": true, + "with-stream_ssl_preread_module": true, + "with-threads": true + }, + "running": true, + "ssl": { + "built": [ + "OpenSSL", + "1.1.1", + "", + "11", + "Sep", + "2018" + ], + "run": [ + "OpenSSL", + "1.1.1", + "", + "11", + "Sep", + "2018" + ] + }, + "start_time": 0 + }, + "legacySystemMetadata": { + "boot": 1594225800000, + "disk_partitions": [ + { + "device": "/dev/sda2", + "fstype": "ext4", + "mountpoint": "/" + }, + { + "device": "/dev/sda1", + "fstype": "ext3", + "mountpoint": "/boot" + } + ], + "id": 1, + "network": { + "default": "", + "interfaces": [ + { + "ipv4": { + "address": "127.0.0.1", + "netmask": "255.0.0.0", + "prefixlen": 8 + }, + "ipv6": { + "address": "::1", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "prefixlen": 128 + }, + "mac": "00:00:00:00:00:00", + "name": "lo" + }, + { + "ipv4": { + "address": "10.149.41.97", + "netmask": "255.255.240.0", + "prefixlen": 20 + }, + "ipv6": { + "address": "fe80::250:56ff:fe98:b512", + "netmask": "ffff:ffff:ffff:ffff::", + "prefixlen": 64 + }, + "mac": "00:50:56:98:b5:12", + "name": "ens32" + }, + { + "ipv4": { + "address": "192.0.10.1", + "netmask": "255.255.255.0", + "prefixlen": 24 + }, + "mac": "02:42:24:f1:ae:8a", + "name": "docker0" + } + ] + }, + "os-type": "linux", + "processor": { + "architecture": "x86_64", + "cache": { + "L1d": "32K", + "L1i": "32K", + "L2": "1024K", + "L3": "25344K" + }, + "cores": "1", + "cpus": "4", + "hypervisor": "VMware", + "mhz": "2300.000", + "model": "Intel(R) Xeon(R) Gold 6140 CPU @ 2.30GHz", + "virtualization": "full" + }, + "release": { + "codename": "bionic", + "id": "ubuntu", + "name": "Ubuntu", + "version": "18.04.2 LTS (Bionic Beaver)", + "version_id": "18.04" + } + }, + "muted": false, + "state": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "status": { + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + }, + "type": "OTHER_INSTANCE", + "version": "1.19.0" + }, + "desiredState": { + "type": "OTHER_INSTANCE" + }, + "metadata": { + "createTime": "2020-07-08T16:42:07.97301Z", + "displayName": "Test Instance 1", + "kind": "instance", + "links": { + "rel": "/api/v1/infrastructure/locations/unspecified/instances/instance-1" + }, + "name": "instance-1", + "ref": "/infrastructure/locations/unspecified/instances/instance-1", + "uid": "ec8d8dac-10b6-4195-943b-1a5d65dd131c", + "updateTime": "2020-07-08T16:42:07.97301Z" + } + } + ] + } + }, + "InstanceGroupRequest": { + "value": { + "metadata": { + "name": "k8s-nginx-deploy", + "displayName": "K8S NGINX+ deployment", + "description": "k8s-nginx-deploy" + }, + "desiredState": {} + } + }, + "GetInstanceGroupResponse": { + "value": { + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + }, + "desiredState": {}, + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + } + } + }, + "ListInstanceGroupsResponse": { + "value": { + "items": [ + { + "currentStatus": { + "instanceRefs": [], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": {}, + "metadata": { + "createTime": "2020-05-13T09:29:49.464273Z", + "description": "us-west-2 autoscale group", + "displayName": "aws-autoscale-group", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/amz-us-west-2-as-group" + }, + "name": "amz-us-west-2-as-group", + "uid": "802ef1f8-9105-474a-b9a7-599837efd6b4", + "updateTime": "2020-05-13T09:29:49.464273Z" + } + }, + { + "currentStatus": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/eks-cluster/instances/30dc361a3729" + }, + { + "ref": "/infrastructure/locations/eks-cluster/instances/01a9eead50e5" + } + ], + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": {}, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "K8S NGINX+ deployment", + "displayName": "k8s-nginx-deploy", + "kind": "instance-group", + "links": { + "rel": "/api/v1/infrastructure/instance-groups/k8s-nginx-deploy" + }, + "name": "k8s-nginx-deploy", + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + ] + } + }, + "IdentityProviderRequest": { + "value": { + "metadata": { + "name": "resource-name", + "displayName": "My Display Name", + "description": "This is a sample description string. It provides information about the resource.", + "tags": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ] + }, + "desiredState": { + "environmentRefs": [ + { + "ref": "/services/environments/dev" + } + ], + "identityProvider": { + "type": "API_KEY" + } + } + } + }, + "IdentityProviderClientListRequest": { + "value": { + "items": [ + { + "metadata": { + "name": "resource-name", + "displayName": "My Display Name", + "description": "This is a sample description string. It provides information about the resource.", + "tags": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ] + }, + "desiredState": { + "credential": { + "type": "API_KEY", + "apiKey": "ADv2ZheQnLjVx5klhQ39" + } + } + } + ] + } + }, + "IdentityProviderClientRequest": { + "value": { + "metadata": { + "name": "resource-name", + "displayName": "My Display Name", + "description": "This is a sample description string. It provides information about the resource.", + "tags": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ] + }, + "desiredState": { + "credential": { + "type": "API_KEY", + "apiKey": "ADv2ZheQnLjVx5klhQ39" + } + } + } + }, + "IdentityProviderClientPatchMetadataRequest": { + "value": { + "metadata": { + "name": "resource-name", + "description": "This is a sample description string. It provides information about the resource." + } + } + }, + "IdentityProviderClientPatchDesiredStateRequest": { + "value": { + "desiredState": { + "credential": { + "type": "API_KEY", + "apiKey": "ADv2ZheQnLjVx5klhQ39" + } + } + } + }, + "ComponentRequest": { + "value": { + "metadata": { + "name": "resource-name", + "displayName": "My Display Name", + "description": "This is a sample description string. It provides information about the resource.", + "tags": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ] + }, + "desiredState": { + "ingress": { + "uris": { + "/api/golf/": { + "matchMethod": "PREFIX" + } + }, + "gatewayRefs": [ + { + "ref": "/services/environments/dev/gateways/sportsgw" + } + ] + }, + "publishedApiRefs": [ + { + "ref": "/services/environments/dev/apps/sports/published-apis/golf-pub" + } + ], + "backend": { + "workloadGroups": { + "serverGrp1": { + "uris": { + "{{httpWorkloadInstance1}}": {} + } + } + }, + "monitoring": {} + }, + "security": { + "rateLimit": { + "defaultLimit": { + "rate": 1, + "rateMeasure": "SECONDS" + } + } + } + } + } + }, + "OtherLocationRequest": { + "value": { + "metadata": { + "name": "my-other-location", + "description": "Other Location for managing instances", + "displayName": "OtherLocation-1", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "OTHER_LOCATION" + } + } + }, + "AWSLocationRequest": { + "value": { + "metadata": { + "name": "my-aws-location", + "description": "AWS Location for us-east-1", + "displayName": "us-east-1-location", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AWS_LOCATION", + "vpcID": "vpc-1234", + "region": "us-east-1", + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-aws-integration" + } + } + } + }, + "AzureLocationRequest": { + "value": { + "metadata": { + "name": "my-azure-location", + "description": "Azure Location for West US 2", + "displayName": "westus2-location", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AZURE_LOCATION", + "resourceGroup": "myResourceGroup", + "subscriptionID": "mySubscriptionID", + "region": "westus2", + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-azure-integration" + } + } + } + }, + "OtherLocationResponse": { + "value": { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "Other Location for managing instances", + "displayName": "OtherLocation-1", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-other-location" + }, + "name": "my-other-location", + "tags": [ + "dev", + "prod" + ], + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + }, + "AWSLocationResponse": { + "value": { + "currentStatus": { + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-aws-integration" + }, + "region": "us-east-1", + "type": "AWS_LOCATION", + "vpcID": "vpc-1234" + }, + "desiredState": { + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-aws-integration" + }, + "region": "us-east-1", + "type": "AWS_LOCATION", + "vpcID": "vpc-1234" + }, + "metadata": { + "createTime": "2020-05-13T16:57:02.931198Z", + "description": "AWS Location for us-east-1", + "displayName": "us-east-1-location", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-aws-location" + }, + "name": "my-aws-location", + "tags": [ + "dev", + "prod" + ], + "uid": "62f72025-59a1-4c11-8cca-798b5e12efb8", + "updateTime": "2020-05-13T16:57:02.931198Z" + } + } + }, + "AzureLocationResponse": { + "value": { + "currentStatus": { + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-azure-integration" + }, + "type": "AZURE_LOCATION", + "subscriptionID": "mySubscriptionID", + "region": "westus2" + }, + "desiredState": { + "integrationRef": { + "ref": "/api/v1/platform/integrations/my-azure-integration" + }, + "type": "AZURE_LOCATION", + "subscriptionID": "mySubscriptionID", + "region": "westus2" + }, + "metadata": { + "createTime": "2020-05-13T16:57:02.931198Z", + "description": "Azure Location for West US 2", + "displayName": "westus2-location", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-azure-location" + }, + "name": "my-azure-location", + "tags": [ + "dev", + "prod" + ], + "uid": "62f72025-59a1-4c11-8cca-798b5e12efb8", + "updateTime": "2020-05-13T16:57:02.931198Z" + } + } + }, + "ListLocationResponse": { + "value": { + "items": [ + { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T09:29:49.464273Z", + "description": "Location for instances where location has not been specified", + "displayName": "Unspecified (default)", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/unspecified" + }, + "name": "unspecified", + "tags": [ + "default" + ], + "uid": "802ef1f8-9105-474a-b9a7-599837efd6b4", + "updateTime": "2020-05-13T09:29:49.464273Z" + } + }, + { + "currentStatus": { + "type": "OTHER_LOCATION" + }, + "desiredState": { + "type": "OTHER_LOCATION" + }, + "metadata": { + "createTime": "2020-05-13T16:58:17.058124Z", + "description": "Other Location for managing instances", + "displayName": "OtherLocation-1", + "kind": "location", + "links": { + "rel": "/api/v1/infrastructure/locations/my-other-location" + }, + "name": "my-other-location", + "tags": [ + "dev", + "prod" + ], + "uid": "97d06b70-0d00-4863-b86d-40ab29efefdc", + "updateTime": "2020-05-13T16:58:17.058124Z" + } + } + ] + } + }, + "AWSRequest": { + "value": { + "metadata": { + "name": "my-instance-template", + "description": "AWS Instance Template for T2 large", + "displayName": "T2large-InstanceTemplate", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AWS_INSTANCE_TEMPLATE", + "amiID": "ami-a0cfeed8", + "instanceType": "t2.large", + "subnetID": "subnet-12345678", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "publicKey": "my-public-key", + "associatePublicIPAddress": true + } + } + }, + "AzureRequestWithMarketplaceImageAndUsingExistingNic": { + "value": { + "metadata": { + "name": "my-azure-template-for-standard-A1", + "description": "Azure Instance Template for Standard A1", + "displayName": "Standard_A1-instance-template", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AZURE_INSTANCE_TEMPLATE", + "image": { + "type": "AZURE_IMAGE_REFERENCE", + "publisher": "nginxinc", + "offer": "nginx-plus-v1", + "sku": "nginx-plus-ub1804", + "version": "latest" + }, + "instanceType": "Standard_A1", + "adminUser": "azureuser", + "publicKey": "ssh-rsa my-public-key-string", + "networkInterface": { + "type": "AZURE_NIC_ID", + "nicID": "/subscriptions/mySubscriptionID/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/myNicID" + } + } + } + }, + "AzureRequestWithCustomImageAndCreatingNewNicAndPublicIP": { + "value": { + "metadata": { + "name": "my-azure-template-for-standard-DS1_v2", + "description": "Azure Instance Template for Standard DS1v2", + "displayName": "DS1_v2-instance-template", + "tags": [ + "dev", + "prod" + ] + }, + "desiredState": { + "type": "AZURE_INSTANCE_TEMPLATE", + "image": { + "type": "AZURE_IMAGE_ID", + "imageID": "/subscriptions/mySubscriptionID/resourceGroups/myResourceGroup/providers/Microsoft.Compute/images/myCustomImageID" + }, + "instanceType": "Standard_DS1_v2", + "adminUser": "azureuser", + "publicKey": "ssh-rsa my-public-key-string", + "networkInterface": { + "type": "AZURE_NIC_CONFIG", + "virtualNetwork": "my-virtual-network", + "subnet": "my-subnet", + "securityGroup": "my-network-sg", + "publicIp": true + } + } + } + }, + "AWSResponse": { + "value": { + "currentStatus": { + "amiID": "ami-a0cfeed8", + "associatePublicIPAddress": true, + "instanceType": "t2.large", + "publicKey": "my-public-key", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "subnetID": "subnet-12345678", + "type": "AWS_INSTANCE_TEMPLATE" + }, + "desiredState": { + "amiID": "ami-a0cfeed8", + "associatePublicIPAddress": true, + "instanceType": "t2.large", + "publicKey": "my-public-key", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "subnetID": "subnet-12345678", + "type": "AWS_INSTANCE_TEMPLATE" + }, + "metadata": { + "createTime": "2020-04-17T02:18:44.394232Z", + "description": "AWS Instance Template for T2 large", + "displayName": "T2large-InstanceTemplate", + "kind": "instance-template", + "links": { + "rel": "/api/v1/infrastructure/locations/location-1/instance-templates/my-instance-template" + }, + "name": "my-instance-template", + "uid": "4157d480-cd0e-40a0-8ba8-d0d5ce17c5d6", + "updateTime": "2020-04-17T02:18:44.394232Z" + } + } + }, + "AzureResponse": { + "value": { + "currentStatus": { + "type": "AZURE_INSTANCE_TEMPLATE", + "image": { + "type": "AZURE_IMAGE_REFERENCE", + "publisher": "nginxinc", + "offer": "nginx-plus-v1", + "sku": "nginx-plus-ub1804", + "version": "latest" + }, + "instanceType": "Standard_A1", + "adminUser": "azureuser", + "publicKey": "ssh-rsa my-public-key-string", + "networkInterface": { + "type": "AZURE_NIC_ID", + "nicID": "/subscriptions/mySubscriptionID/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/myNicID" + } + }, + "desiredState": { + "type": "AZURE_INSTANCE_TEMPLATE", + "image": { + "type": "AZURE_IMAGE_REFERENCE", + "publisher": "nginxinc", + "offer": "nginx-plus-v1", + "sku": "nginx-plus-ub1804", + "version": "latest" + }, + "instanceType": "Standard_A1", + "adminUser": "azureuser", + "publicKey": "ssh-rsa my-public-key-string", + "networkInterface": { + "type": "AZURE_NIC_ID", + "nicID": "/subscriptions/mySubscriptionID/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/myNicID" + } + }, + "metadata": { + "createTime": "2020-04-17T02:18:44.394232Z", + "description": "Azure Instance Template for Standard A1", + "displayName": "Standard_A1-instance-template", + "kind": "instance-template", + "links": { + "rel": "/api/v1/infrastructure/locations/azure-westus2/instance-templates/my-azure-template-for-standard-A1" + }, + "name": "my-azure-template-for-standard-A1", + "uid": "4157d480-cd0e-40a0-8ba8-d0d5ce17c5d6", + "updateTime": "2020-04-17T02:18:44.394232Z" + } + } + }, + "AWSListResponse": { + "value": { + "items": [ + { + "currentStatus": { + "amiID": "ami-a0cfeed8", + "associatePublicIPAddress": true, + "instanceType": "t2.large", + "publicKey": "my-public-key", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "subnetID": "subnet-12345678", + "type": "AWS_INSTANCE_TEMPLATE" + }, + "desiredState": { + "amiID": "ami-a0cfeed8", + "associatePublicIPAddress": true, + "instanceType": "t2.large", + "publicKey": "my-public-key", + "securityGroupIDs": [ + "sg-12345678", + "sg-98765432" + ], + "subnetID": "subnet-12345678", + "type": "AWS_INSTANCE_TEMPLATE" + }, + "metadata": { + "createTime": "2020-04-17T02:18:44.394232Z", + "description": "AWS Instance Template for T2 large", + "displayName": "T2large-InstanceTemplate", + "kind": "instance-template", + "links": { + "rel": "/api/v1/infrastructure/locations/location-1/instance-templates/my-instance-template" + }, + "name": "my-instance-template", + "uid": "4157d480-cd0e-40a0-8ba8-d0d5ce17c5d6", + "updateTime": "2020-04-17T02:18:44.394232Z" + } + } + ] + } + } + } + } +} \ No newline at end of file diff --git a/content/controller/api/reference/ctlr-platform-api.md b/content/controller/api/reference/ctlr-platform-api.md new file mode 100644 index 000000000..dcf93ec2b --- /dev/null +++ b/content/controller/api/reference/ctlr-platform-api.md @@ -0,0 +1,14 @@ +--- +description: Represents the state of the F5 NGINX Controller Platform REST API. +docs: DOCS-1278 +doctypes: + - reference +type: redoc +tags: + - api +title: Platform API +toc: false +weight: 100 +--- + +{{< openapi spec="/controller/api/reference/ctlr-platform-openapi.json" >}} diff --git a/content/controller/api/reference/ctlr-platform-openapi.json b/content/controller/api/reference/ctlr-platform-openapi.json new file mode 100644 index 000000000..b0e568961 --- /dev/null +++ b/content/controller/api/reference/ctlr-platform-openapi.json @@ -0,0 +1,7437 @@ +{ + "openapi": "3.0.0", + "info":{ + "title": "NGINX Controller Platform REST API", + "version": "v1", + "description": "Manage the NGINX Controller platform." + }, + "tags": [ + + { + "name": "Authentication Providers", + "description": "Use the Authentication Providers API to create, update, or remove authentication providers." + }, + { + "name": "Login Providers", + "description": "Use the Login Providers API to retrieve a list of the available authentication providers." + }, + { + "name": "Login", + "description": "Use the Login API to retrieve an authentication cookie. Include the session ID that's in the authentication cookie in the request header for all subsequent requests." + }, + { + "name": "Reset Password", + "description": "Use the Reset Password API to manage NGINX Controller password recovery." + }, + { + "name": "Verify Authorization", + "description": "Verifies the current session is authorized to access a list of resources." + }, + { + "name": "Nodes", + "description": "Use the Nodes API to manage NGINX Controller control plane nodes." + }, + { + "name": "Global Settings", + "description": "Use the Global Settings API to manage the system-wide configurations for NGINX Controller and Controller Agent." + }, + { + "name": "Global Features", + "description": "Use the Features API to manage NGINX Controller features." + }, + { + "name": "License", + "description": "Use the License API to manage the license(s) for NGINX Controller." + }, + { + "name": "Integrations", + "description": "Use the Integrations API to manage integrated cloud provider or data collector accounts." + }, + { + "name": "Users", + "description": "Use the Users API to manage NGINX Controller User accounts." + }, + { + "name": "Roles", + "description": "Use the Roles API to manage NGINX Controller User Roles." + }, + { + "name": "Groups", + "description": "Use the Groups API to manage NGINX Controller authentication groups." + } + ], + "servers": [ + { + "description": "NGINX Controller API", + "url": "https://{{CONTROLLER_FQDN}}/api/v1" + }], + "paths": { + "/platform/auth/password-recovery": { + "post": { + "tags": [ + "Password" + ], + "summary": "Request password reset", + "description": "Creates a password recovery code for user with given email and sends an email with reset link. The reset link is valid for an hour.", + "operationId": "requestResetPassword", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ResetPasswordRequest" + }, + "example": { + "metadata": { + "name": "user@example.com" + } + } + } + } + }, + "responses": { + "204": { + "description": "Successfully created the password recovery code. No content was returned." + } + } + } + }, + "/platform/auth/password-recovery/{code}": { + "put": { + "tags": [ + "Password" + ], + "summary": "Password reset", + "description": "Check if the new password satisfies the policy constraints and update the password for the user associated with the recovery code.", + "operationId": "resetPassword", + "parameters": [ + { + "$ref": "#/components/parameters/code" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ResetPassword" + }, + "example": { + "desiredState": { + "password": "********" + }, + "metadata": { + "name": "user@example.com" + } + } + } + } + }, + "responses": { + "204": { + "description": "Successfully updated the password for the user. No content was returned." + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/auth/providers": { + "post": { + "tags": [ + "Authentication Providers" + ], + "summary": "Create an Authentication Provider", + "description": "Creates a new authentication provider.\n", + "operationId": "configureAuthProvider", + "requestBody": { + "description": "Contains the configuration for a supported authentication provider.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuthProvider" + }, + "example": { + "desiredState": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldaps://dc1.mydomain.com", + "sslMode": "VERIFY_CA", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west" + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east" + } + } + ] + } + }, + "metadata": { + "kind": "auth-provider", + "name": "ad-dc-1", + "tags": [ + "dev", + "us-1" + ] + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the authentication provider.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuthProvider" + }, + "example": { + "currentStatus": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "status": "CONNECTED_BOUND", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-west", + "name": "eng-west", + "displayName": "West engineering" + } + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-east", + "name": "eng-east", + "displayName": "East engineering" + } + } + } + ] + } + }, + "desiredState": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west" + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east" + } + } + ] + } + }, + "metadata": { + "createTime": "2020-02-24T22:55:51.729272Z", + "kind": "auth-provider", + "name": "ad-dc-1", + "tags": [ + "dev", + "us-1" + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "409": { + "$ref": "#/components/responses/Conflict" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "get": { + "tags": [ + "Authentication Providers" + ], + "summary": "List all Authentication Providers", + "description": "Returns a list of all authentication providers.\n", + "operationId": "listAuthProviders", + "responses": { + "200": { + "description": "Successfully returned a list of authentication providers.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListAuthProviders" + }, + "example": { + "items": [ + { + "currentStatus": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "status": "CONNECTED_BOUND", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "filteredGroups": [ + "CN=Office_Admins_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=RM_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=Sales_Warsaw,OU=Warsaw,OU=Europe,OU=Acme Financial", + "CN=ACC_EU,OU=Europe,OU=Acme Financial" + ], + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-west", + "name": "eng-west", + "displayName": "West engineering" + } + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-east", + "name": "eng-east", + "displayName": "East engineering" + } + } + } + ] + } + }, + "desiredState": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "filteredGroups": [ + "CN=Office_Admins_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=RM_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=Sales_Warsaw,OU=Warsaw,OU=Europe,OU=Acme Financial", + "CN=ACC_EU,OU=Europe,OU=Acme Financial" + ], + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west" + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east" + } + } + ] + } + }, + "metadata": { + "createTime": "2020-02-24T22:55:51.729272Z", + "kind": "auth-provider", + "name": "ad-dc-1", + "tags": [ + "dev", + "us-1" + ] + } + } + ] + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/auth/providers/{providerName}": { + "get": { + "tags": [ + "Authentication Providers" + ], + "summary": "Get an Authentication Provider", + "description": "Returns information about the specified authentication provider.", + "parameters": [ + { + "name": "providerName", + "in": "path", + "description": "The name of the authentication provider.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "operationId": "getProvider", + "responses": { + "200": { + "description": "Successfully returned the requested authentication provider.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuthProvider" + }, + "example": { + "currentStatus": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "status": "CONNECTED_BOUND", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "filteredGroups": [ + "CN=Office_Admins_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=RM_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=Sales_Warsaw,OU=Warsaw,OU=Europe,OU=Acme Financial", + "CN=ACC_EU,OU=Europe,OU=Acme Financial" + ], + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-west", + "name": "eng-west", + "displayName": "West engineering" + } + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-east", + "name": "eng-east", + "displayName": "East engineering" + } + } + } + ] + } + }, + "desiredState": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "filteredGroups": [ + "CN=Office_Admins_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=RM_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=Sales_Warsaw,OU=Warsaw,OU=Europe,OU=Acme Financial", + "CN=ACC_EU,OU=Europe,OU=Acme Financial" + ], + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west" + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east" + } + } + ] + } + }, + "metadata": { + "createTime": "2020-02-24T22:55:51.729272Z", + "kind": "auth-provider", + "name": "ad-dc-1", + "tags": [ + "dev", + "us-1" + ] + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "patch": { + "tags": [ + "Authentication Providers" + ], + "summary": "Update an Authentication Provider", + "description": "Updates the configuration for an existing authentication provider.\n\n> **Note:** You can use PATCH to update one or more specific configurations. Any settings not specified in the request will not be changed.\n", + "parameters": [ + { + "name": "providerName", + "in": "path", + "description": "The name of the authentication provider.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "description": "Contains the desired configuration changes for an authentication provider.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuthProvider" + }, + "example": { + "desiredState": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@example.com", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west" + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east" + } + } + ] + } + }, + "metadata": { + "kind": "auth-provider", + "name": "ad-dc-1", + "tags": [ + "dev", + "us-1" + ] + } + } + } + } + }, + "operationId": "partialUpdateProvider", + "responses": { + "200": { + "description": "Successfully updated the configuration for the authentication provider.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuthProvider" + }, + "example": { + "currentStatus": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "status": "CONNECTED_BOUND", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-west", + "name": "eng-west", + "displayName": "West engineering" + } + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-east", + "name": "eng-east", + "displayName": "East engineering" + } + } + } + ] + } + }, + "desiredState": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west" + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east" + } + } + ] + } + }, + "metadata": { + "createTime": "2020-02-24T22:55:51.729272Z", + "kind": "auth-provider", + "name": "ad-dc-1", + "tags": [ + "dev", + "us-1" + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "put": { + "tags": [ + "Authentication Providers" + ], + "summary": "Update an Authentication Provider", + "description": "Updates all of the configurations for an existing authentication provider.\n", + "parameters": [ + { + "name": "providerName", + "in": "path", + "description": "The name of the authentication provider.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "description": "Contains the desired configuration for the authentication provider.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuthProvider" + }, + "example": { + "desiredState": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "passwd1234" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west" + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east" + } + } + ] + } + }, + "metadata": { + "kind": "auth-provider", + "name": "ad-dc-1", + "tags": [ + "dev", + "us-1" + ] + } + } + } + } + }, + "operationId": "updateProvider", + "responses": { + "200": { + "description": "Successfully updated the configuration for the authentication provider.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuthProvider" + }, + "example": { + "currentStatus": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "status": "CONNECTED_BOUND", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-west", + "name": "eng-west", + "displayName": "West engineering" + } + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-east", + "name": "eng-east", + "displayName": "East engineering" + } + } + } + ] + } + }, + "desiredState": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west" + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east" + } + } + ] + } + }, + "metadata": { + "createTime": "2020-02-24T22:55:51.729272Z", + "kind": "auth-provider", + "name": "ad-dc-1", + "tags": [ + "dev", + "us-1" + ] + } + } + } + } + }, + "201": { + "description": "Successfully created the configuration for the provider.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuthProvider" + }, + "example": { + "currentStatus": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "status": "CONNECTED_BOUND", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "********" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "*******" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-west", + "name": "eng-west", + "displayName": "West engineering" + } + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-east", + "name": "eng-east", + "displayName": "East engineering" + } + } + } + ] + } + }, + "desiredState": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "********" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "*******" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west" + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east" + } + } + ] + } + }, + "metadata": { + "createTime": "2020-02-24T22:55:51.729272Z", + "kind": "auth-provider", + "name": "ad-dc-1", + "tags": [ + "dev", + "us-1" + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "delete": { + "tags": [ + "Authentication Providers" + ], + "summary": "Delete an Authentication Provider", + "description": "Deletes the authentication provider.", + "parameters": [ + { + "name": "providerName", + "in": "path", + "description": "The name of the authentication provider.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "operationId": "disable", + "responses": { + "204": { + "description": "Successfully deleted the authentication provider." + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/auth/verify": { + "x-f5-experimental": true, + "post": { + "x-f5-experimental": true, + "tags": [ + "Verify Auth Permissions" + ], + "summary": "Request a collection of permitted actions", + "description": "Returns a list of authorized actions based on a list of requested actions and resources.\n", + "operationId": "doVerify", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VerifyList" + }, + "example": { + "items": [ + { + "path": "/services/environments/test1", + "method": "DELETE" + }, + { + "path": "/services/environments/test2", + "method": "PUT" + } + ] + } + } + } + }, + "responses": { + "200": { + "description": "Successfully retrieved a list of authorized actions.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VerifyList" + }, + "example": { + "items": [ + { + "path": "/services/environments/test1", + "method": "DELETE", + "permitted": false + }, + { + "path": "/services/environments/test2", + "method": "PUT", + "permitted": true + } + ] + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/global": { + "get": { + "tags": [ + "Global Settings" + ], + "summary": "Get the Global Settings", + "operationId": "getGlobalSettings", + "description": "Gets information about the global settings for NGINX Controller.", + "responses": { + "200": { + "description": "Successfully retrieved the global settings for NGINX Controller.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlobalSettings" + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "put": { + "tags": [ + "Global Settings" + ], + "operationId": "putGlobalSettings", + "description": "Updates the global settings used by NGINX Controller.", + "summary": "Update the Global Settings", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlobalSettings" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the global settings for NGINX Controller.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlobalSettings" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "patch": { + "tags": [ + "Global Settings" + ], + "operationId": "patchGlobalSettings", + "description": "Updates the global settings for NGINX Controller.", + "summary": "Update part of the Global Settings", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlobalSettings" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the global settings for NGINX Controller.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlobalSettings" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/global/features": { + "x-f5-experimental": true, + "get": { + "x-f5-experimental": true, + "tags": [ + "Global Features" + ], + "summary": "Get the Global Features", + "operationId": "getGlobalFeatures", + "description": "Gets information about the global features for NGINX Controller.", + "responses": { + "200": { + "description": "Successfully retrieved the global features for NGINX Controller.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlobalFeatures" + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/license": { + "get": { + "tags": [ + "License" + ], + "summary": "Get a License", + "description": "Gets information for the active NGINX Controller license.", + "operationId": "getLicense", + "responses": { + "200": { + "description": "Successfully retrieved the active Controller license.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LicenseResponse" + }, + "example": { + "metadata": { + "name": "license" + }, + "desiredState": { + "content": "******", + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 250, + "product": "NGINX Controller Load Balancing", + "serial": 20145, + "type": "PAID", + "version": 1, + "id": "ADC" + } + ] + }, + "currentStatus": { + "subscription": { + "id": "b6d7c577-b708-44ad-839c-9743f85fcf7c" + }, + "entitlement": { + "features": [ + { + "name": "NGINX Controller Load Balancing", + "limit": 20, + "unitOfMeasure": "WORKLOADS", + "type": "PAID", + "id": "ADC", + "expiry": "1996-02-26T00:00:00.000Z", + "gracePeriodDays": 30 + } + ] + }, + "state": { + "currentInstance": { + "id": "8ce9b80a-f7fc-48fd-ac28-8d5f3fe898d6", + "type": "NGINX Controller", + "status": "INVALID", + "version": "3.3.0", + "telemetryLastReported": "2021-05-10T00:00:00Z", + "features": [ + { + "name": "NGINX Controller Load Balancing", + "used": 5, + "aggregateUsed": 10, + "remaining": 5, + "unitOfMeasure": "WORKLOADS", + "id": "ADC", + "daysUntilExpiry": 20, + "gracePeriodRemainingDays": 30 + } + ], + "configState": { + "selfConfigState": { + "isConfigured": true, + "isConfiguring": false, + "isError": false, + "isDeleting": false + }, + "conditions": [] + } + } + }, + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 250, + "product": "NGINX Controller Load Balancing", + "serial": 20145, + "type": "PAID", + "version": 1, + "id": "ADC" + } + ] + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "put": { + "tags": [ + "License" + ], + "summary": "Upload a License", + "description": "Uploads an NGINX Controller license.\n\nProvide your NGINX Controller license in the JSON request body as a base64-encoded string or as an unencoded customer association token.\n", + "operationId": "putLicense", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PutLicenseRequest" + }, + "example": { + "metadata": { + "name": "license" + }, + "desiredState": { + "content": "" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully uploaded the license.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LicenseResponse" + }, + "example": { + "metadata": { + "name": "license" + }, + "desiredState": { + "content": "******", + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 250, + "product": "NGINX Controller Load Balancing", + "serial": 20145, + "type": "PAID", + "version": 1, + "id": "ADC" + } + ] + }, + "currentStatus": { + "subscription": { + "id": "b6d7c577-b708-44ad-839c-9743f85fcf7c" + }, + "entitlement": { + "features": [ + { + "name": "NGINX Controller Load Balancing", + "limit": 20, + "unitOfMeasure": "WORKLOADS", + "type": "PAID", + "id": "ADC", + "expiry": "2021-02-26T00:00:00.000Z", + "gracePeriodDays": 30 + } + ] + }, + "state": { + "currentInstance": { + "id": "8ce9b80a-f7fc-48fd-ac28-8d5f3fe898d6", + "type": "NGINX Controller", + "status": "INVALID", + "version": "3.3.0", + "telemetryLastReported": "2021-05-10T00:00:00Z", + "features": [ + { + "name": "NGINX Controller Load Balancing", + "used": 5, + "aggregateUsed": 10, + "remaining": 5, + "unitOfMeasure": "WORKLOADS", + "id": "ADC", + "daysUntilExpiry": 20, + "gracePeriodRemainingDays": 0 + } + ], + "configState": { + "selfConfigState": { + "isConfigured": true, + "isConfiguring": false, + "isError": false, + "isDeleting": false + }, + "conditions": [] + } + } + }, + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 250, + "product": "NGINX Controller Load Balancing", + "serial": 20145, + "type": "PAID", + "version": 1, + "id": "ADC" + } + ] + } + } + } + } + }, + "202": { + "description": "The request to upload a license succeeded. The License resource will be created when the upload is complete.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LicenseResponse" + }, + "example": { + "metadata": { + "name": "license" + }, + "desiredState": { + "content": "******", + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 250, + "product": "NGINX Controller Load Balancing", + "serial": 20145, + "type": "PAID", + "version": 1, + "id": "ADC" + } + ] + }, + "currentStatus": { + "subscription": { + "id": "b6d7c577-b708-44ad-839c-9743f85fcf7c" + }, + "entitlement": { + "features": [ + { + "name": "NGINX Controller Load Balancing", + "limit": 20, + "unitOfMeasure": "WORKLOADS", + "type": "PAID", + "id": "ADC", + "expiry": "2021-02-26T00:00:00.000Z", + "gracePeriodDays": 30 + } + ] + }, + "state": { + "currentInstance": { + "id": "8ce9b80a-f7fc-48fd-ac28-8d5f3fe898d6", + "type": "NGINX Controller", + "status": "INVALID", + "version": "3.3.0", + "telemetryLastReported": "2021-05-10T00:00:00Z", + "features": [ + { + "name": "NGINX Controller Load Balancing", + "used": 5, + "aggregateUsed": 10, + "remaining": 5, + "unitOfMeasure": "WORKLOADS", + "id": "ADC", + "daysUntilExpiry": 20, + "gracePeriodRemainingDays": 0 + } + ], + "configState": { + "selfConfigState": { + "isConfigured": false, + "isConfiguring": true, + "isError": false, + "isDeleting": false + }, + "conditions": [] + } + } + }, + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 250, + "product": "NGINX Controller Load Balancing", + "serial": 20145, + "type": "PAID", + "version": 1, + "id": "ADC" + } + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "delete": { + "tags": [ + "License" + ], + "operationId": "deleteLicense", + "summary": "Delete a License", + "description": "Deletes an NGINX Controller License resource.", + "responses": { + "202": { + "description": "Successfully scheduled the request to delete the license.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LicenseResponse" + }, + "example": { + "metadata": { + "name": "license" + }, + "desiredState": { + "content": "******", + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 250, + "product": "NGINX Controller Load Balancing", + "serial": 20145, + "type": "PAID", + "version": 1, + "id": "ADC" + } + ] + }, + "currentStatus": { + "subscription": { + "id": "b6d7c577-b708-44ad-839c-9743f85fcf7c" + }, + "entitlement": { + "features": [ + { + "name": "NGINX Controller Load Balancing", + "limit": 20, + "unitOfMeasure": "WORKLOADS", + "type": "PAID", + "id": "ADC", + "expiry": "2021-02-26T00:00:00.000Z", + "gracePeriodDays": 30 + } + ] + }, + "state": { + "currentInstance": { + "id": "8ce9b80a-f7fc-48fd-ac28-8d5f3fe898d6", + "type": "NGINX Controller", + "status": "INVALID", + "version": "3.3.0", + "telemetryLastReported": "2021-05-10T00:00:00Z", + "features": [ + { + "name": "NGINX Controller Load Balancing", + "used": 5, + "aggregateUsed": 10, + "remaining": 5, + "unitOfMeasure": "WORKLOADS", + "id": "ADC", + "daysUntilExpiry": 20, + "gracePeriodRemainingDays": 0 + } + ], + "configState": { + "selfConfigState": { + "isConfigured": false, + "isConfiguring": false, + "isError": false, + "isDeleting": true + }, + "conditions": [] + } + } + }, + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 250, + "product": "NGINX Controller Load Balancing", + "serial": 20145, + "type": "PAID", + "version": 1, + "id": "ADC" + } + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/licenses/nginx-plus-licenses": { + "get": { + "tags": [ + "License" + ], + "summary": "List all NGINX+ Licenses.", + "description": "Returns a list of all NGINX Plus licenses.", + "operationId": "listNginxPlusLicenses", + "responses": { + "200": { + "description": "Successfully retrieved the list of NGINX Plus licenses.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NginxPlusLicensesList" + }, + "example": { + "items": [ + { + "metadata": { + "name": "controller-provided", + "ref": "/platform/licenses/nginx-plus-licenses/controller-provided", + "kind": "license" + }, + "currenStatus": { + "certKey": "a valid certificate key for NGINX Plus", + "privateKey": "a valid private key for NGINX Plus" + }, + "desiredState": { + "certKey": "a valid certificate key for NGINX Plus", + "privateKey": "a valid private key for NGINX Plus" + } + } + ] + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "402": { + "$ref": "#/components/responses/PaymentRequired" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/licenses/nginx-plus-licenses/{licenseName}": { + "get": { + "tags": [ + "License" + ], + "summary": "Get the NGINX Plus certificate and key bundle by name", + "description": "Gets the NGINX Plus certificate and key as a JSON or gzip file.", + "operationId": "getNginxPlusLicense", + "parameters": [ + { + "in": "header", + "name": "Content-Type", + "schema": { + "type": "string", + "example": "application/json" + } + }, + { + "name": "licenseName", + "description": "The name of the license.", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/LicenseName" + } + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the specified NGINX Plus license.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NginxPlusLicenseResponse" + }, + "example": { + "metadata": { + "name": "controller-provided", + "ref": "/platform/licenses/nginx-plus-licenses/controller-provided", + "kind": "license" + }, + "currenStatus": { + "certKey": "a valid certificate key for NGINX Plus", + "privateKey": "a valid private key for NGINX Plus" + }, + "desiredState": { + "certKey": "a valid certificate key for NGINX Plus", + "privateKey": "a valid private key for NGINX Plus" + } + } + }, + "application/gzip": { + "schema": { + "type": "string", + "format": "binary" + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "402": { + "$ref": "#/components/responses/PaymentRequired" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/FileNotFound" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/license-file": { + "post": { + "deprecated": true, + "tags": [ + "License" + ], + "summary": "Upload a License", + "description": "Uploads an NGINX Controller license.\n\nAccepts a single file (encoded as Base64) that may contain one or more product licenses.\n", + "operationId": "uploadLicense", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LicenseRequest" + } + } + } + }, + "responses": { + "200": { + "description": "Successfully uploaded a license.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/License" + }, + "example": { + "metadata": { + "name": "license" + }, + "desiredState": { + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 20, + "product": "NGINX Controller API Management", + "serial": 20145, + "type": "PRODUCTION", + "version": 1, + "id": "APIM" + } + ] + }, + "currentStatus": { + "items": [ + { + "expiry": "2021-05-10T00:00:00Z", + "instanceCount": 20, + "product": "NGINX Controller API Management", + "serial": 20145, + "type": "PRODUCTION", + "version": 1, + "id": "APIM" + } + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/login": { + "post": { + "tags": [ + "Login" + ], + "summary": "Log in to NGINX Controller", + "description": "Logs in to the NGINX Controller platform and returns the user account's authentication cookie.\n\nThe session ID is returned in an authentication cookie named `session`. You must include this session ID in the request header for all subsequent requests to authenticate to the platform. For example:\n\n`curl -X GET --cookie 'session=' --header 'Content-type: application/json' 'https://192.0.2.10/api/v1/platform/global'`\n\n- The maximum session lifetime is 8 hours. Sessions are purged within 5 minutes of expiring. Once the session lifetime limit is exceeded, you must log in to obtain a fresh session token.\n- Sessions are purged immediately on logout.\n- Sessions are purged immediately if the user is removed.\n", + "operationId": "login", + "requestBody": { + "description": "A JSON object containing the type and the authentication information itself.\n- If type == 'BASIC', then the username and password are required.\n- If type == 'ACTIVE_DIRECTORY', then the authentication provider name, username, and password are required.\n- If type == 'AZURE_ACTIVE_DIRECTORY', then the authentication provider name, and type are required.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Login" + } + } + } + }, + "security": [], + "responses": { + "200": { + "description": "Received request to login with OIDC. Returns the Azure's authorization URL for SSO.\n" + }, + "204": { + "description": "Successfully authenticated.\nThe session ID is returned in an authentication cookie named `session`. You must include this session ID in the request header for all subsequent requests.\n", + "headers": { + "Set-Cookie": { + "schema": { + "type": "string", + "example": "session=.eJwlzjEOwzAIQNG7MGcA22DIZSKbgNI1aaaqd2-kLn96w; HttpOnly; Secure; Path=/" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "409": { + "$ref": "#/components/responses/StatusConflict" + }, + "500": { + "$ref": "#/components/responses/Internal" + }, + "503": { + "$ref": "#/components/responses/ServiceUnavailable" + } + } + }, + "get": { + "tags": [ + "Login" + ], + "summary": "Get Login Session", + "description": "Checks the validity of the current user's session. Returns the user's account info if the session is valid.", + "operationId": "sessionsStatus", + "responses": { + "200": { + "description": "Successfully validated the session ID and returned the associated user account info.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/User" + }, + "example": { + "currentStatus": { + "account": "1", + "authn": "amplify", + "email": "nobody@nodomain.com", + "firstName": "nobody", + "id": 777, + "lastLogin": 1580759750, + "lastName": "here", + "password": "********", + "roles": [ + { + "links": { + "rel": "/api/v1/platform/roles/guest", + "name": "guest" + }, + "ref": "/platform/roles/guest" + } + ], + "groups": [ + { + "links": { + "rel": "/api/v1/platform/auth/groups/guest", + "name": "guest" + }, + "ref": "/platform/groups/guest" + } + ] + }, + "desiredState": { + "email": "nobody@nodomain.com", + "firstName": "nobody", + "lastName": "here", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/guest" + } + ] + }, + "metadata": { + "createTime": "2020-02-03T19:15:24.916809Z", + "kind": "user", + "name": "nobody@nodomain.com", + "updateTime": "2020-02-03T19:55:50.291147Z" + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "500": { + "$ref": "#/components/responses/Internal" + }, + "503": { + "$ref": "#/components/responses/ServiceUnavailable" + } + } + } + }, + "/platform/oidc-handler": { + "get": { + "tags": [ + "OIDC Handler" + ], + "summary": "OIDC redirect URL, that handles Login into controller using OIDC", + "description": "The endpoint that OIDC provider talks to after authenticating the user. It returns the user account's authentication cookie.\n\nThe session ID is returned in an authentication cookie named `session`. You must include this session ID in the request header for all subsequent requests to authenticate to the platform. For example:\n\n`curl -X GET --cookie 'session=' --header 'Content-type: application/json' 'https://192.0.2.10/api/v1/platform/global'`\n\n- The maximum session lifetime is 8 hours.\n- Sessions are purged immediately on logout.\n- Sessions are purged immediately if the user is removed or associated Auth provider is removed.\n Read more about this here https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-auth-code-flow#successful-response\n", + "operationId": "oidc-handler", + "parameters": [ + { + "in": "query", + "name": "code", + "schema": { + "type": "string" + }, + "description": "The authorization_code that the app requested. The app exchnages teh code to get access token." + }, + { + "in": "query", + "name": "state", + "schema": { + "type": "string" + }, + "description": "State parameter is included if the app added it in the login request to the Identity provider. This helps prevent CSRF attacks." + }, + { + "in": "query", + "name": "error", + "schema": { + "type": "string" + }, + "description": "An error code string sent from the Identity provider when authentication did not go through as expected." + }, + { + "in": "query", + "name": "error_description", + "schema": { + "type": "string" + }, + "description": "A specific error message that describes the root cause of the authentication failure." + } + ], + "security": [], + "responses": { + "302": { + "description": "Successfully authenticated and Sends Redirect back to the home page.\nThe session ID is returned in an authentication cookie named `session`. You must include this session ID in the request header for all subsequent requests.\n", + "headers": { + "Set-Cookie": { + "schema": { + "type": "string", + "example": "session=.eJwlzjEOwzAIQNG7MGcA22DIZSKbgNI1aaaqd2-kLn96w; HttpOnly; Secure; Path=/" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "503": { + "$ref": "#/components/responses/ServiceUnavailable" + } + } + } + }, + "/platform/logout": { + "post": { + "tags": [ + "Logout" + ], + "summary": "Log out of NGINX Controller", + "description": "Logs the user account out of the NGINX Controller platform and expires the authentication cookie.", + "operationId": "logout", + "responses": { + "204": { + "description": "The user was successfully logged out.", + "headers": { + "Set-Cookie": { + "schema": { + "type": "string", + "example": "session=; Expires=Jan 1 1970; Path=/" + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "500": { + "$ref": "#/components/responses/Internal" + }, + "503": { + "$ref": "#/components/responses/ServiceUnavailable" + } + } + } + }, + "/platform/login-providers": { + "get": { + "tags": [ + "Login Providers" + ], + "summary": "List all Active Authentication Providers", + "description": "Returns a list of all [authentication providers](#tag/Authentication-Providers) that are configured and ready for use.\n", + "operationId": "listReadyProviders", + "responses": { + "200": { + "description": "Successfully returned a list of the available authentication providers.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListReadyProviders" + }, + "example": { + "providers": [ + { + "type": "BASIC", + "name": "local" + } + ] + } + } + } + } + } + } + }, + "/platform/nodes": { + "get": { + "tags": [ + "Nodes" + ], + "summary": "List all Nodes", + "operationId": "getNodeList", + "description": "Gets information about the NGINX Controller control-plane nodes.", + "responses": { + "200": { + "description": "Successfully retrieved the NGINX Controller nodes.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NodeList" + }, + "example": { + "items": [ + { + "currentStatus": { + "hostname": "host-1", + "ip": "192.0.2.1", + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": { + "hostname": "host-1", + "ip": "192.0.2.1" + }, + "metadata": { + "createTime": "2020-09-30T14:14:46Z", + "kind": "node", + "name": "host-1" + } + }, + { + "currentStatus": { + "hostname": "host-2", + "ip": "192.0.2.2", + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": { + "hostname": "host-2", + "ip": "192.0.2.2" + }, + "metadata": { + "createTime": "2020-09-30T14:19:13.755911Z", + "kind": "node", + "name": "host-2" + } + }, + { + "currentStatus": { + "hostname": "host-3", + "ip": "192.0.2.3", + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 0, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": { + "hostname": "host-3", + "ip": "192.0.2.3" + }, + "metadata": { + "createTime": "2020-09-30T14:19:16.155299Z", + "kind": "node", + "name": "host-3" + } + } + ] + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/InternalServerError" + } + } + }, + "post": { + "tags": [ + "Nodes" + ], + "summary": "Create a Node", + "operationId": "addNode", + "description": "Creates a new NGINX Controller control-plane node.\n\nAfter a node has been created, you must install NGINX Controller on the node to finish adding the node to the cluster.\nIn the JSON response, look for the `install.sh` command and join-key located under `currentStatus.state.conditions.message`.\nThe `type` is `install`. Copy and run this command with the join-key on the new node to finish the setup.\n\nNote: (Optional) If you're adding a deleted node back to the cluster, you must uninstall NGINX Controller from the node before \nrunning the `install.sh` command with the new join-key. To uninstall NGINX Controller, run the uninstall script on the node: \n`/opt/nginx-controller/uninstall.sh`.\n", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Node" + }, + "example": { + "desiredState": { + "hostname": "host-2", + "ip": "192.0.2.2" + }, + "metadata": { + "name": "host-2" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the NGINX Controller node.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Node" + }, + "example": { + "currentStatus": { + "hostname": "host-2", + "ip": "192.0.2.2", + "state": { + "conditions": [ + { + "message": "install.sh --join-key ", + "type": "install" + } + ], + "selfConfigState": { + "configured": 0, + "configuring": 1, + "deleting": 0, + "error": 0, + "isConfigured": false, + "isConfiguring": true, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": { + "hostname": "host-2", + "ip": "192.0.2.2" + }, + "metadata": { + "createTime": "2020-10-01T10:03:39.228626Z", + "kind": "node", + "name": "host-2" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "409": { + "$ref": "#/components/responses/Conflict" + }, + "500": { + "$ref": "#/components/responses/InternalServerError" + }, + "501": { + "$ref": "#/components/responses/NotImplemented" + } + } + } + }, + "/platform/nodes/{nodeName}": { + "get": { + "tags": [ + "Nodes" + ], + "summary": "Get a Node", + "operationId": "getNode", + "description": "Gets information about an NGINX Controller control-plane node by name.", + "parameters": [ + { + "$ref": "#/components/parameters/nodeName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the specified NGINX Controller node.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Node" + }, + "example": { + "currentStatus": { + "hostname": "host-2", + "ip": "192.0.2.2", + "state": { + "conditions": [ + { + "message": "install.sh --join-key ", + "type": "install" + } + ], + "selfConfigState": { + "configured": 0, + "configuring": 1, + "deleting": 0, + "error": 0, + "isConfigured": false, + "isConfiguring": true, + "isDeleting": false, + "isError": false, + "total": 1 + } + } + }, + "desiredState": { + "hostname": "host-2", + "ip": "192.0.2.2" + }, + "metadata": { + "createTime": "2020-10-01T10:03:39.228626Z", + "kind": "node", + "name": "host-2" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/InternalServerError" + } + } + }, + "delete": { + "tags": [ + "Nodes" + ], + "summary": "Delete a Node", + "operationId": "deleteNode", + "description": "Deletes an NGINX Controller control-plane node by name.\nThe node's status is `isDeleting` while the workloads\nare being drained from it. Once draining is complete the node is deleted.\n\nNote: (Optional) You must uninstall NGINX Controller from the deleted node before you can add the node back to the cluster. \nTo uninstall NGINX Controller, run the uninstall script on the node: `/opt/nginx-controller/uninstall.sh`.\n", + "parameters": [ + { + "$ref": "#/components/parameters/nodeName" + } + ], + "responses": { + "202": { + "description": "Successfully scheduled the node to be deleted.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Node" + }, + "example": { + "currentStatus": { + "hostname": "host-2", + "ip": "192.0.2.2", + "state": { + "conditions": [], + "selfConfigState": { + "configured": 1, + "configuring": 0, + "deleting": 1, + "error": 0, + "isConfigured": true, + "isConfiguring": false, + "isDeleting": true, + "isError": false, + "total": 1 + } + } + }, + "desiredState": { + "hostname": "host-2", + "ip": "192.0.2.2" + }, + "metadata": { + "createTime": "2020-10-01T10:03:39.228626Z", + "kind": "node", + "name": "host-2" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/InternalServerError" + }, + "501": { + "$ref": "#/components/responses/NotImplemented" + } + } + } + }, + "/platform/roles": { + "get": { + "tags": [ + "Roles" + ], + "summary": "List Roles", + "description": "Returns an unfiltered list of all Role resources.", + "operationId": "getRoles", + "responses": { + "200": { + "description": "Successfully retrieved a list of all of the Role resources.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RoleListResponse" + }, + "example": { + "items": [ + { + "currentStatus": { + "permissions": [ + { + "access": "FULL", + "path": "/" + } + ] + }, + "desiredState": { + "permissions": [ + { + "access": "FULL", + "path": "/" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T21:29:59.866708Z", + "kind": "role", + "name": "admin" + } + } + ] + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "post": { + "tags": [ + "Roles" + ], + "summary": "Create a Role", + "description": "Creates a new Role resource.", + "operationId": "createRole", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Role" + }, + "example": { + "desiredState": { + "permissions": [ + { + "access": "READ", + "path": "/services/environments/dev/" + }, + { + "access": "WRITE", + "path": "/services/environments/test/" + } + ] + }, + "metadata": { + "kind": "role", + "name": "role1", + "tags": [ + "dev", + "test" + ] + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested Role resource.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Role" + }, + "example": { + "currentStatus": { + "permissions": [ + { + "access": "READ", + "path": "/services/environments/dev/" + }, + { + "access": "WRITE", + "path": "/services/environments/test/" + } + ] + }, + "desiredState": { + "permissions": [ + { + "access": "READ", + "path": "/services/environments/dev/" + }, + { + "access": "WRITE", + "path": "/services/environments/test/" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T22:55:51.729272Z", + "kind": "role", + "name": "role1", + "tags": [ + "dev", + "test" + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "409": { + "$ref": "#/components/responses/Conflict" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/roles/{roleName}": { + "get": { + "tags": [ + "Roles" + ], + "summary": "Get a Role", + "description": "Returns information about a specific Role resource by its name.\n", + "operationId": "getRoleByName", + "parameters": [ + { + "$ref": "#/components/parameters/roleName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Role resource.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Role" + }, + "example": { + "currentStatus": { + "permissions": [ + { + "access": "READ", + "path": "/services/environments/test/" + }, + { + "access": "WRITE", + "path": "/services/environments/dev/" + } + ] + }, + "desiredState": { + "permissions": [ + { + "access": "READ", + "path": "/services/environments/test/" + }, + { + "access": "WRITE", + "path": "/services/environments/dev/" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T22:38:33.842929Z", + "kind": "role", + "name": "role1", + "tags": [ + "dev", + "test" + ] + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "put": { + "tags": [ + "Roles" + ], + "summary": "Upsert a Role", + "description": "Creates a new Role resource or updates an existing Role resource.\n", + "operationId": "upsertRole", + "parameters": [ + { + "$ref": "#/components/parameters/roleName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Role" + }, + "example": { + "desiredState": { + "permissions": [ + { + "access": "WRITE", + "path": "/services/environments/dev/" + } + ] + }, + "metadata": { + "kind": "role", + "name": "role1", + "tags": [ + "dev", + "test" + ] + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified Role resource.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Role" + }, + "example": { + "currentStatus": { + "permissions": [ + { + "access": "WRITE", + "path": "/services/environments/dev/" + } + ] + }, + "desiredState": { + "permissions": [ + { + "access": "WRITE", + "path": "/services/environments/dev/" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T22:38:33.842929Z", + "kind": "role", + "name": "role1", + "updateTime": "2020-02-24T22:53:14.340686Z", + "tags": [ + "dev", + "test" + ] + } + } + } + } + }, + "201": { + "description": "Successfully created the specified Role resource.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Role" + }, + "example": { + "currentStatus": { + "permissions": [ + { + "access": "WRITE", + "path": "/services/environments/dev/" + } + ] + }, + "desiredState": { + "permissions": [ + { + "access": "WRITE", + "path": "/services/environments/dev/" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T22:38:33.842929Z", + "kind": "role", + "name": "role1", + "updateTime": "2020-02-24T22:53:14.340686Z", + "tags": [ + "dev", + "test" + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "delete": { + "tags": [ + "Roles" + ], + "summary": "Delete a Role", + "description": "Deletes a specific Role resource.", + "operationId": "deleteRole", + "parameters": [ + { + "$ref": "#/components/parameters/roleName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the role. No content was returned." + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "409": { + "$ref": "#/components/responses/Conflict" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/users": { + "get": { + "tags": [ + "Users" + ], + "summary": "List all Users", + "operationId": "listUsers", + "description": "Returns an unfiltered list of all User resources.", + "responses": { + "200": { + "description": "Successfully retrieved a list of User resources.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserListResponse" + }, + "example": { + "items": [ + { + "metadata": { + "name": "john.doe@example.com", + "kind": "user", + "createTime": "2019-07-29T09:12:33.001Z", + "displayName": "John Doe", + "description": "NGINX controller user with prod env role" + }, + "desiredState": { + "firstName": "John", + "lastName": "Doe", + "email": "john.doe@example.com", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/prod" + } + ] + }, + "currentStatus": { + "account": "1", + "id": 123, + "firstName": "John", + "lastName": "Doe", + "lastLogin": 1570132969, + "email": "john.doe@example.com", + "authn": "amplify", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/prod", + "links": { + "rel": "/api/v1/platform/roles/prod", + "name": "prod" + } + } + ] + } + } + ] + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/InternalServerError" + } + } + }, + "post": { + "tags": [ + "Users" + ], + "summary": "Create a User", + "operationId": "addUser", + "description": "Creates a new User resource.", + "requestBody": { + "description": "Defines the details to use for the new User resource.", + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/User" + }, + "example": { + "metadata": { + "name": "john.doe@example.com", + "displayName": "John Doe", + "description": "NGINX controller user with prod env role" + }, + "desiredState": { + "firstName": "John", + "lastName": "Doe", + "email": "john.doe@example.com", + "password": "*******", + "roles": [ + { + "ref": "/platform/roles/prod" + } + ], + "groups": [ + { + "ref": "/platform/groups/guest" + } + ] + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested User resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/User" + }, + "example": { + "metadata": { + "name": "john.doe@example.com", + "kind": "user", + "createTime": "2019-07-29T09:12:33.001Z", + "displayName": "John Doe", + "description": "NGINX controller user with prod env role" + }, + "desiredState": { + "firstName": "John", + "lastName": "Doe", + "email": "john.doe@example.com", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/prod" + } + ], + "groups": [ + { + "ref": "/platform/groups/guest" + } + ] + }, + "currentStatus": { + "account": "1", + "id": 123, + "firstName": "John", + "lastName": "Doe", + "lastLogin": 1570132969, + "email": "john.doe@example.com", + "authn": "amplify", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/prod", + "links": { + "rel": "/api/v1/platform/roles/prod", + "name": "prod" + } + } + ], + "groups": [ + { + "links": { + "rel": "/api/v1/platform/auth/groups/guest", + "name": "guest" + }, + "ref": "/platform/groups/guest" + } + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "409": { + "$ref": "#/components/responses/Conflict" + }, + "500": { + "$ref": "#/components/responses/InternalServerError" + } + } + } + }, + "/platform/users/{userName}": { + "get": { + "tags": [ + "Users" + ], + "summary": "Get a User", + "operationId": "getUser", + "description": "Gets information for an existing User resource.", + "parameters": [ + { + "$ref": "#/components/parameters/userName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested User resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/User" + }, + "example": { + "metadata": { + "name": "john.doe@example.com", + "kind": "user", + "createTime": "2019-07-29T09:12:33.001Z", + "displayName": "John Doe", + "description": "NGINX controller user with prod env role" + }, + "desiredState": { + "firstName": "John", + "lastName": "Doe", + "email": "john.doe@example.com", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/prod" + } + ] + }, + "currentStatus": { + "account": "1", + "id": 123, + "firstName": "John", + "lastName": "Doe", + "lastLogin": 1570132969, + "email": "john.doe@example.com", + "authn": "amplify", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/prod", + "links": { + "rel": "/api/v1/platform/roles/prod", + "name": "prod" + } + } + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/InternalServerError" + } + } + }, + "patch": { + "tags": [ + "Users" + ], + "summary": "Update a User", + "operationId": "updateUser", + "description": "Updates an existing User resource.", + "parameters": [ + { + "$ref": "#/components/parameters/userName" + } + ], + "requestBody": { + "description": "Defines the updates to make to the specified User resource.", + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateUser" + }, + "example": { + "metadata": { + "name": "john.doe@example.com", + "displayName": "Jane Doe", + "description": "NGINX controller user with prod env role" + }, + "desiredState": { + "firstName": "Jane", + "lastName": "Doe", + "password": "********", + "verifyPassword": "********", + "roles": [ + { + "ref": "/platform/roles/new" + } + ] + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified User resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateUser" + }, + "example": { + "metadata": { + "name": "john.doe@example.com", + "kind": "user", + "createTime": "2019-07-29T09:12:33.001Z", + "displayName": "Jane Doe", + "description": "NGINX controller user with prod env role" + }, + "desiredState": { + "firstName": "Jane", + "lastName": "Doe", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/new" + } + ] + }, + "currentStatus": { + "account": "1", + "id": 123, + "firstName": "Jane", + "lastName": "Doe", + "lastLogin": 1570132969, + "email": "john.doe@example.com", + "authn": "amplify", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/new", + "links": { + "rel": "/api/v1/platform/roles/new", + "name": "new" + } + } + ] + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/InternalServerError" + } + } + }, + "delete": { + "tags": [ + "Users" + ], + "summary": "Delete a User", + "operationId": "deleteUser", + "description": "Deletes a User resource.", + "parameters": [ + { + "$ref": "#/components/parameters/userName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the user. No content was returned." + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/InternalServerError" + } + } + } + }, + "/platform/integrations": { + "get": { + "tags": [ + "Integrations" + ], + "summary": "List all Integrations", + "description": "Returns an unfiltered list of integration accounts.", + "operationId": "listIntegrations", + "responses": { + "200": { + "description": "Successfully retreived a list of integration accounts.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListIntegrationResponse" + }, + "examples": { + "INTEGRATIONS": { + "$ref": "#/components/examples/ListIntegrationResponse" + } + } + } + } + } + } + }, + "post": { + "tags": [ + "Integrations" + ], + "summary": "Create an Integration account", + "description": "Creates a new integration account.", + "operationId": "addIntegration", + "requestBody": { + "description": "Defines the integration account to be added.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Integration" + }, + "examples": { + "AWS": { + "$ref": "#/components/examples/AWSRequest" + }, + "Splunk": { + "$ref": "#/components/examples/SplunkRequest" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested integration.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + }, + "examples": { + "AWS": { + "$ref": "#/components/examples/AWSResponse" + }, + "Splunk": { + "$ref": "#/components/examples/SplunkResponse" + } + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "examples": { + "BadReq": { + "$ref": "#/components/examples/BadRequestError" + } + } + } + } + }, + "409": { + "description": "The request failed due to a conflict with an existing integration.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "examples": { + "AlreadyExists": { + "$ref": "#/components/examples/AlreadyExistsError" + } + } + } + } + } + } + } + }, + "/platform/integrations/{integrationName}": { + "get": { + "tags": [ + "Integrations" + ], + "summary": "Get an Integration account", + "description": "Gets information about a specific integration account.", + "operationId": "getIntegration", + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + }, + "examples": { + "AWS": { + "$ref": "#/components/examples/AWSResponse" + } + } + } + } + }, + "404": { + "description": "Integration resource not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "examples": { + "NotFound": { + "$ref": "#/components/examples/NotFoundError" + } + } + } + } + } + } + }, + "put": { + "tags": [ + "Integrations" + ], + "summary": "Upsert an Integration account", + "description": "Creates a new integration account or updates an existing integration account.", + "operationId": "upsertIntegration", + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Integration" + }, + "examples": { + "AWS": { + "$ref": "#/components/examples/AWSRequest" + }, + "Splunk": { + "$ref": "#/components/examples/SplunkRequest" + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successfully received the request to update the specified integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + }, + "examples": { + "AWS": { + "$ref": "#/components/examples/AWSResponse" + }, + "Splunk": { + "$ref": "#/components/examples/SplunkResponse" + } + } + } + } + }, + "201": { + "description": "Successfully updated the specified integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetIntegrationResponse" + }, + "examples": { + "AWS": { + "$ref": "#/components/examples/AWSResponse" + }, + "Splunk": { + "$ref": "#/components/examples/SplunkResponse" + } + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "examples": { + "BadReq": { + "$ref": "#/components/examples/BadRequestError" + } + } + } + } + } + } + }, + "patch": { + "tags": [ + "Integrations" + ], + "summary": "Update an Integration account", + "description": "Updates an existing integration account.", + "operationId": "updateIntegration", + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateIntegration" + }, + "examples": { + "AWS": { + "$ref": "#/components/examples/AWSRequestForPatch" + }, + "Splunk": { + "$ref": "#/components/examples/SplunkRequestForPatch" + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successfully received the request to update the specified integration account.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateIntegration" + }, + "examples": { + "AWS": { + "$ref": "#/components/examples/AWSResponseForPatch" + }, + "Splunk": { + "$ref": "#/components/examples/SplunkResponseForPatch" + } + } + } + } + }, + "400": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "examples": { + "BadReq": { + "$ref": "#/components/examples/BadRequestError" + } + } + } + } + }, + "404": { + "description": "Integration resource not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "examples": { + "NotFound": { + "$ref": "#/components/examples/NotFoundError" + } + } + } + } + } + } + }, + "delete": { + "tags": [ + "Integrations" + ], + "summary": "Delete an Integration account", + "description": "Deletes the specified integration account.", + "operationId": "deleteIntegration", + "parameters": [ + { + "$ref": "#/components/parameters/IntegrationName" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified integration account." + }, + "404": { + "description": "Integration not found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "examples": { + "NotFound": { + "$ref": "#/components/examples/NotFoundError" + } + } + } + } + } + } + } + }, + "/platform/auth/groups": { + "get": { + "tags": [ + "Groups" + ], + "summary": "List Authentication Groups", + "description": "Returns an unfiltered list of all of the authentication groups.\n\nAn authentication group (or \"AuthN Group\") is a collection of Roles. All AuthN Groups are globally unique. You can assign AuthN Groups to external authentication provider properties (such as AD Groups).\n", + "operationId": "getGroups", + "responses": { + "200": { + "description": "Successfully retrieved a list of all authentication groups.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GroupListResponse" + }, + "example": { + "items": [ + { + "currentStatus": { + "roles": [ + { + "ref": "/platform/roles/admin", + "links": { + "rel": "/api/v1/platform/roles/admin", + "name": "admin", + "displayName": "Admin Role" + } + } + ] + }, + "desiredState": { + "roles": [ + { + "ref": "/platform/roles/user" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T21:29:59.866708Z", + "kind": "group", + "name": "us-group-1" + } + } + ] + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "post": { + "tags": [ + "Groups" + ], + "summary": "Add an Authentication Group", + "description": "Creates a new authentication group.", + "operationId": "createGroup", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + }, + "example": { + "desiredState": { + "roles": [ + { + "ref": "/platform/roles/user" + }, + { + "ref": "/platform/roles/custom-1" + } + ] + }, + "metadata": { + "kind": "group", + "name": "group-1" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successfully created the requested authentication group.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + }, + "example": { + "currentStatus": { + "roles": [ + { + "ref": "/platform/roles/user", + "links": { + "rel": "/api/v1/platform/roles/user", + "name": "user", + "displayName": "User Role" + } + }, + { + "ref": "/platform/roles/custom-1", + "links": { + "rel": "/api/v1/platform/roles/custom-1", + "name": "custom-1", + "displayName": "First Custom Role" + } + } + ] + }, + "desiredState": { + "roles": [ + { + "ref": "/platform/roles/user" + }, + { + "ref": "/platform/roles/custom-1" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T22:55:51.729272Z", + "kind": "group", + "name": "group-1" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "409": { + "$ref": "#/components/responses/Conflict" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + }, + "/platform/auth/groups/{groupName}": { + "get": { + "tags": [ + "Groups" + ], + "summary": "Get an Authentication Group", + "description": "Returns information about a specific Authentication Group resource.\n", + "operationId": "getGroupByName", + "parameters": [ + { + "$ref": "#/components/parameters/groupName" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the requested Authentication Group resource.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + }, + "example": { + "currentStatus": { + "roles": [ + { + "ref": "/platform/roles/user", + "links": { + "rel": "/api/v1/platform/roles/user", + "name": "user", + "displayName": "User Role" + } + }, + { + "ref": "/platform/roles/custom-1", + "links": { + "rel": "/api/v1/platform/roles/custom-1", + "name": "custom-1", + "displayName": "First Custom Role" + } + } + ] + }, + "desiredState": { + "roles": [ + { + "ref": "/platform/roles/user" + }, + { + "ref": "/platform/roles/custom-1" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T22:38:33.842929Z", + "kind": "group", + "name": "group-1" + } + } + } + } + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "put": { + "tags": [ + "Groups" + ], + "summary": "Upsert an Authentication Group", + "description": "Creates a new Authentication Group resource or updates an existing Authentication Group.\n", + "operationId": "upsertGroup", + "parameters": [ + { + "$ref": "#/components/parameters/groupName" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + }, + "example": { + "desiredState": { + "roles": [ + { + "ref": "/platform/roles/user" + }, + { + "ref": "/platform/roles/custom-1" + } + ] + }, + "metadata": { + "kind": "group", + "name": "group-1" + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfully updated the specified group.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + }, + "example": { + "currentStatus": { + "roles": [ + { + "ref": "/platform/roles/user", + "links": { + "rel": "/api/v1/platform/roles/user", + "name": "user", + "displayName": "User Role" + } + }, + { + "ref": "/platform/roles/custom-1", + "links": { + "rel": "/api/v1/platform/roles/custom-1", + "name": "custom-1", + "displayName": "First Custom Role" + } + } + ] + }, + "desiredState": { + "roles": [ + { + "ref": "/platform/roles/user" + }, + { + "ref": "/platform/roles/custom-1" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T22:38:33.842929Z", + "kind": "group", + "name": "group-1", + "updateTime": "2020-02-24T22:53:14.340686Z" + } + } + } + } + }, + "201": { + "description": "Successfully created the specified group.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + }, + "example": { + "currentStatus": { + "roles": [ + { + "ref": "/platform/roles/user", + "links": { + "rel": "/api/v1/platform/roles/user", + "name": "user", + "displayName": "User Role" + } + }, + { + "ref": "/platform/roles/custom-1", + "links": { + "rel": "/api/v1/platform/roles/custom-1", + "name": "custom-1", + "displayName": "First Custom Role" + } + } + ] + }, + "desiredState": { + "roles": [ + { + "ref": "/platform/roles/user" + }, + { + "ref": "/platform/roles/custom-1" + } + ] + }, + "metadata": { + "createTime": "2020-02-24T22:38:33.842929Z", + "kind": "group", + "name": "group-1", + "updateTime": "2020-02-24T22:53:14.340686Z" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest" + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + }, + "delete": { + "tags": [ + "Groups" + ], + "summary": "Delete an Authentication Group", + "description": "Deletes a specific authentication group.", + "operationId": "deleteGroup", + "parameters": [ + { + "$ref": "#/components/parameters/groupName" + } + ], + "responses": { + "204": { + "description": "Succesfully deleted the authentication group. No content was returned." + }, + "401": { + "$ref": "#/components/responses/Unauthorized" + }, + "403": { + "$ref": "#/components/responses/Forbidden" + }, + "404": { + "$ref": "#/components/responses/NotFound" + }, + "409": { + "$ref": "#/components/responses/Conflict" + }, + "500": { + "$ref": "#/components/responses/Internal" + } + } + } + } + }, + "components": { + "securitySchemes": { + "cookieAuth": { + "type": "apiKey", + "in": "cookie", + "name": "session" + } + }, + "parameters": { + "nodeName": { + "in": "path", + "name": "nodeName", + "description": "The name of the node.", + "schema": { + "type": "string" + }, + "required": true + }, + "roleName": { + "in": "path", + "name": "roleName", + "schema": { + "type": "string" + }, + "required": true, + "description": "The name of the Role.\n" + }, + "userName": { + "in": "path", + "name": "userName", + "schema": { + "type": "string", + "format": "email" + }, + "required": true, + "description": "The name for the User resource.\n" + }, + "code": { + "in": "path", + "name": "code", + "schema": { + "type": "string", + "minLength": 1 + }, + "required": true, + "description": "The password recovery code.\n" + }, + "IntegrationName": { + "name": "integrationName", + "in": "path", + "description": "The name of the integration account.", + "required": true, + "schema": { + "type": "string" + } + }, + "groupName": { + "in": "path", + "name": "groupName", + "schema": { + "type": "string" + }, + "required": true, + "description": "The name of the group.\n" + } + }, + "responses": { + "BadRequest": { + "description": "Bad input parameter, or possibly a bad URI. Check the input for typos and try again.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "code": 4005, + "message": "Error verifying the authorization: could not decode the request. Check the request body and try again." + } + } + } + }, + "Unauthorized": { + "description": "User authentication is invalid or missing.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "code": 401, + "message": "authentication needed" + } + } + } + }, + "Forbidden": { + "description": "The request failed because of insufficient privileges.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "code": 403, + "message": "unauthorized" + } + } + } + }, + "Internal": { + "description": "The request cannot be processed because of an internal server error.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "code": 4000, + "message": "An internal error occurred while verifying the authorization. If the problem persists, contact the system administrator." + } + } + } + }, + "InternalServerError": { + "description": "The request cannot be processed because of an internal server error.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "Conflict": { + "description": "The request failed due to a conflict with an existing Node resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "NotImplemented": { + "description": "The request method is not supported by the server in the current configuration and cannot be handled.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + } + } + } + }, + "ServiceUnavailable": { + "description": "The request cannot be processed because service is unavailable. Please try resending the request. \n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error retrieving the user information. Try resending the request.", + "code": 2351 + } + } + } + }, + "StatusConflict": { + "description": "Wrong username and/or password.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "message": "Error logging in: incorrect username or password. Check the login credentials, then try resending the request.", + "code": 2379 + } + } + } + }, + "NotFound": { + "description": "The specified authentication group was not found.\n", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "code": 8920, + "message": "Error getting or deleting group: group not found." + } + } + } + }, + "PaymentRequired": { + "description": "The request failed due to missing or expired license.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "code": 4700, + "details": [ + { + "description": "Error getting the NGINX Plus certificate and public key: the license is either expired or no license exists. Upload a valid license file and then try again." + } + ], + "message": "payment required." + } + } + } + }, + "FileNotFound": { + "description": "The requested certificate and key could not be found.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorModel" + }, + "example": { + "code": 4600, + "details": [ + { + "description": "The requested resource was not found or is unavailable." + } + ], + "message": "The requested NGINX Plus license is not found or is unavailable." + } + } + } + } + }, + "schemas": { + "VerifyList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AuthRequest" + }, + "description": "Contains a list of authentication requests.\n" + } + } + }, + "AuthRequest": { + "type": "object", + "required": [ + "path", + "method" + ], + "example": { + "items": [ + { + "path": "/services/environments/test1", + "method": "DELETE", + "permitted": false + }, + { + "path": "/services/environments/test2", + "method": "PUT", + "permitted": true + } + ] + }, + "properties": { + "path": { + "type": "string" + }, + "method": { + "type": "string", + "enum": [ + "GET", + "POST", + "PUT", + "DELETE", + "HEAD", + "TRACE", + "PATCH", + "CONNECT", + "OPTIONS" + ] + }, + "permitted": { + "type": "boolean" + } + } + }, + "ErrorDetail": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string", + "example": "Error doing : . This can lead to . Try to resolve the issue.", + "description": "A detailed error message returned by the server. \n\nThese messages contain the following information, where applicable:\n\n- What happened.\n- Why it happened.\n- What the consequences are (if any).\n- Recommended action to take to resolve the issue.\n" + } + } + }, + "ErrorModel": { + "type": "object", + "required": [ + "message", + "code" + ], + "properties": { + "message": { + "type": "string", + "example": "Error doing .", + "description": "A human-readable message, in English, that describes the error.\n" + }, + "code": { + "type": "integer", + "example": 1234567, + "description": "A numeric error code that can be used to identify errors for support purposes.\n" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ErrorDetail" + } + } + } + }, + "NodeList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Node" + } + } + } + }, + "Node": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "allOf": [ + { + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + } + } + }, + { + "$ref": "#/components/schemas/NodeDef" + } + ] + }, + "desiredState": { + "$ref": "#/components/schemas/NodeDef" + } + } + }, + "NodeDef": { + "type": "object", + "properties": { + "hostname": { + "type": "string", + "format": "hostname", + "description": "The hostname for the NGINX Controller control-plane node. When adding a node, you must provide a hostname or an IP address, or you can specify both. The hostname must be the real hostname, which you can look up by running the `hostname` command." + }, + "ip": { + "type": "string", + "format": "ipv4", + "example": "192.0.2.0", + "description": "The IP address for the NGINX Controller control-plane node. When adding a node, you must provide an IP address or a hostname, or you can specify both." + } + } + }, + "SelfLinks": { + "type": "object", + "description": "The SelfLinks object contains a link from the resource to itself.\nThis object is used only in responses.\n", + "properties": { + "rel": { + "type": "string", + "example": "/api/v1/services/environments/prod", + "description": "`rel` contains the complete path fragment of a URI and can be used\nto construct a query to the object.\n" + } + } + }, + "ResourceMeta": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "pattern": "^[^A-Z\\s\\x00-\\x1f\\x60\\x7f\\;\\*\\\"\\[\\]\\{\\}\\\\\\/%\\?:=&\\~\\^|#<>]+$", + "not": { + "type": "string", + "enum": [ + ".", + ".." + ] + }, + "minLength": 1, + "maxLength": 1024, + "example": "resource-name", + "description": "Resource name is a unique identifier for a resource within the context of a namespace.\nResource names must conform to [RFC 1738 Section 2.2](https://www.ietf.org/rfc/rfc1738.txt) and have a valid syntax for email addresses. The following rules are enforced:\n\n- do not utilize URL encoding;\n- do not include spaces;\n- do not use uppercase characters, for example, 'A-Z'; extended character sets are supported;\n- do not use the following characters: `\"`, `*`, `:`, `;`, `/`, `\\`, `%`, `?`, `hash`, `=`, `&`, `|`, `~`, `^`, `{`, `}`, `[`, `]`, `<`, `>`;\n- cannot start or end with an `@` sign;\n- cannot be only `.` or `..`\n\nFor example: For a collection resource located at\n\n`https://controller.example.com/api/v1/services/apps/shopping_@1`\n\nthe resource name is \"shopping_@1\".\n" + }, + "displayName": { + "type": "string", + "example": "My Display Name", + "description": "`displayName` is a user friendly resource name. It can be used to define \na longer, and less constrained, name for a resource.\n\nDisplay names:\n\n- are optional (defaults to an empty string if no value is provided),\n- do not have to be unique, \n- cannot be assigned by the server.\n" + }, + "description": { + "type": "string", + "example": "This is a sample description string. It provides information about the resource.", + "description": "`description` is a free-form text property. You can use it to provide information that helps \nto identify the resource.\n\nDescriptions:\n\n- are optional (defaults to an empty string if no value is provided),\n- do not have to be unique, \n- cannot be assigned by the server.\n" + }, + "kind": { + "type": "string", + "example": "-", + "description": "Kind is a string representation of an API resource's data type.\nIt is assigned by the server and cannot be changed. \n\nWhen creating a `kind`, the server uses hyphens to connect word segments; \nsingleton and collection item resources are not pluralized.\n" + }, + "uid": { + "type": "string", + "format": "uuid", + "example": "d290f1ee-6c54-4b01-90e6-d701748f0851", + "description": "Unique Identifier (UID)\n\nUID is a unique identifier in time and space for a resource. \nWhen you create a resource, the server assigns a UID to the resource.\n\nRefer to [IETF RFC 4122](https://tools.ietf.org/html/rfc4122) for more information.\n" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "production_public", + "dev", + "new_app", + "us-west-1", + "emea" + ], + "description": "You can assign `tags` to a resource as a way to help map, scope, \nand organize resources. \n\nThe system uses tag selectors to specify selection criteria that \nmatch resources that have particular tags.\n" + }, + "ref": { + "type": "string", + "example": "/services/environments/prod", + "description": "The `ref` field contains a reference to another NGINX Controller resource.\n" + }, + "links": { + "$ref": "#/components/schemas/SelfLinks" + }, + "createTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T09:12:33.001Z", + "description": "A timestamp that represents the server time when the resource was created.\n\nCreate time is not guaranteed to be set in \"happens-before\" order\nacross separate operations.\n\nIn JSON format, `create_time` type is encoded as a string in the\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n\nFor example: 2018-04-01T01:30:15.01Z\n\nCreate Time is assigned by the server and cannot be changed.\n" + }, + "updateTime": { + "type": "string", + "format": "date-time", + "example": "2019-07-29T10:12:33.001Z", + "description": "A timestamp that represents the server time when the resource was last modified.\n\nResources that have never been updated do not have an `update_time` stamp.\n\nThe default value for resources that have never been updated is the local \nlanguage-specific equivalent of \"null\".\n\nIn JSON format, `update_time` type is encoded as a string as described in \n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt).\n" + } + } + }, + "ConfigStateTally": { + "type": "object", + "properties": { + "isConfigured": { + "type": "boolean", + "description": "The configuration operation is complete." + }, + "isConfiguring": { + "type": "boolean", + "description": "The configuration of the resource, or of its child(ren), is in process." + }, + "isError": { + "type": "boolean", + "description": "An error occurred while configuring the resource or its child(ren)." + }, + "isDeleting": { + "type": "boolean", + "description": "A delete operation is in progress for the resource or its child(ren)." + }, + "total": { + "type": "integer", + "description": "The total number of resources to which the configuration operation applies." + }, + "configured": { + "type": "integer", + "description": "The number of resources that have a complete and valid configuration." + }, + "configuring": { + "type": "integer", + "description": "The number of resources that are in the process of being configured." + }, + "error": { + "type": "integer", + "description": "The number of resources that have encountered an error during the configuration process." + }, + "deleting": { + "type": "integer", + "description": "The number of resources that are in the process of being deleted." + } + } + }, + "ConfigCondition": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The condition type." + }, + "message": { + "type": "string", + "description": "A human-readable message that provides additional information about the configuration operation." + } + } + }, + "ConfigState": { + "type": "object", + "description": "A representation of the resource's current configuration state \nthat comprises the status of the resource itself (`selfConfigState`) and any child \nresources (`childrenConfigState`).\n\nThe conditions array provides additional information during configuration changes.\n", + "properties": { + "selfConfigState": { + "$ref": "#/components/schemas/ConfigStateTally" + }, + "childrenConfigState": { + "$ref": "#/components/schemas/ConfigStateTally" + }, + "conditions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ConfigCondition" + } + } + } + }, + "RoleListResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Role" + }, + "description": "Contains list of Role objects.\n" + } + } + }, + "Role": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/RoleDef" + }, + "currentStatus": { + "$ref": "#/components/schemas/RoleDef" + } + } + }, + "RoleDef": { + "type": "object", + "required": [ + "permissions" + ], + "description": "A Role is a collection of permissions and child Roles. All Roles are globally unique. You can assign Roles to users or to other Roles.\n", + "properties": { + "permissions": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/components/schemas/Permission" + } + } + } + }, + "Permission": { + "type": "object", + "required": [ + "access", + "path" + ], + "description": "A Permission is a pair consisting of a path or object and the desired level of access. Permissions govern Users ability to access specific paths or objects within an Environment.\n", + "properties": { + "path": { + "type": "string", + "example": "/services/environments/dev/", + "pattern": "^(\\/[^A-Z\\s\\x00-\\x1f\\x60\\x7f\\;\\\"\\[\\]\\{\\}\\\\\\/]*)+", + "description": "The path represents an area of the platform to which the Role grants access. For example, to allow the Role to access an Environment named \"dev\", you would define the path \"/services/environments/dev\".\n" + }, + "access": { + "type": "string", + "enum": [ + "NONE", + "READ", + "WRITE", + "FULL" + ], + "description": "Access determines the Role's ability to access a path or object. The options are:\n* NONE: Does not have any access to the path or object\n* READ: Has read only access (HTTP GET requests)\n* WRITE: Has read and write access (POST, PUT, PATCH requests) but cannot delete\n* FULL: Has read, write and delete access\n" + } + } + }, + "ListAuthProviders": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AuthProvider" + } + } + } + }, + "AuthProvider": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/AuthProviderDef" + }, + "desiredState": { + "$ref": "#/components/schemas/AuthProviderDef" + } + } + }, + "AuthProviderDef": { + "type": "object", + "properties": { + "provider": { + "oneOf": [ + { + "$ref": "#/components/schemas/ActiveDirectory" + }, + { + "$ref": "#/components/schemas/AzureActiveDirectory" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "ACTIVE_DIRECTORY": "#/components/schemas/ActiveDirectory", + "AZURE_ACTIVE_DIRECTORY": "#/components/schemas/AzureActiveDirectory" + } + }, + "example": { + "provider": { + "type": "ACTIVE_DIRECTORY", + "status": "CONNECTED_BOUND", + "domain": "DC=mydomain,DC=example,DC=com", + "defaultLoginDomain": "mydomain", + "connection": [ + { + "uri": "ldap://dc1.mydomain.com", + "sslMode": "REQUIRE", + "rawCa": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + ], + "groupSearchFilter": "(objectClass=group)", + "groupMemberAttribute": "memberof", + "userFormat": "UPN", + "bindUser": { + "type": "PASSWORD", + "username": "user@mydomain", + "password": "********" + }, + "pollIntervalSec": 3600, + "groupCacheTimeSec": 7200, + "honorStaleGroups": false, + "filteredGroups": [ + "CN=Office_Admins_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=RM_SF,OU=San Francisco,OU=North America,OU=Acme Financial", + "CN=Sales_Warsaw,OU=Warsaw,OU=Europe,OU=Acme Financial", + "CN=ACC_EU,OU=Europe,OU=Acme Financial" + ], + "groupMappings": [ + { + "external": "Engineering-US-West", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-west", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-west", + "name": "eng-west", + "displayName": "West engineering" + } + } + }, + { + "external": "Engineering-US-East", + "caseSensitive": true, + "internal": { + "ref": "/platform/auth/groups/eng-east", + "links": { + "rel": "/api/v1/platform/auth/groups/eng-east", + "name": "eng-east", + "displayName": "East engineering" + } + } + } + ] + } + } + } + } + }, + "ActiveDirectory": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ACTIVE_DIRECTORY" + ] + }, + "status": { + "type": "string", + "description": "This field is read only and will only be reflected in currentStatus and ignored in desiredState.", + "enum": [ + "CONNECTED_BOUND", + "CONNECTED_BIND_FAILED", + "SSL_CA_FAILURE", + "CONNECTION_FAILURE", + "PENDING" + ] + }, + "domain": { + "type": "string", + "description": "The LDAP domain to authenticate against. The domain is unique per each configured Active Directory authentication provider. This field cannot be updated.\n", + "pattern": "^((DC|OU)=[^,=<>]+,)*(DC=[^,,=<>]+)$", + "example": "DC=mydomain,DC=example,DC=com" + }, + "defaultLoginDomain": { + "type": "string", + "description": "The login domain to use when a user authenticates with only a username rather than specifying DOMAIN\\username or username@domain.fqdn. \n", + "example": "mydomain.mycompany.com" + }, + "groupSearchFilter": { + "type": "string", + "description": "The search filter to use when finding users within a root domain.\n", + "example": "(objectClass=group)" + }, + "groupMemberAttribute": { + "type": "string", + "description": "The LDAP attribute to use for specifying membership in an AD Group.\n", + "example": "memberof" + }, + "userFormat": { + "type": "string", + "enum": [ + "USER_DOMAIN", + "UPN" + ], + "description": "The username format. \n\n- UPN = username@domain\n- USER_DOMAIN = domain/user\n", + "example": "UPN" + }, + "connection": { + "type": "array", + "minItems": 1, + "maxItems": 1, + "items": { + "$ref": "#/components/schemas/AuthConnection" + } + }, + "groupMappings": { + "type": "array", + "description": "Maps AD Group(s) to NGINX Controller Group(s).\n", + "items": { + "$ref": "#/components/schemas/GroupMapping" + } + }, + "bindUser": { + "$ref": "#/components/schemas/BindUser" + }, + "pollIntervalSec": { + "type": "integer", + "description": "Time, in seconds, between refresh of AD information, including the organization unit list.\n", + "minimum": 300, + "default": 3600 + }, + "groupCacheTimeSec": { + "type": "integer", + "description": "Time, in seconds, for which the list of organizational units is honored before being considered stale. \n> This value should be double the configured `pollIntervalSec`.\n", + "minimum": 600, + "default": 7200 + }, + "honorStaleGroups": { + "type": "boolean", + "default": false, + "description": "Controls whether a stale AD Group list should be honored when authorizing and authenticating users.\n" + }, + "filteredGroups": { + "type": "array", + "description": "List of groups in the AD that satifies the `groupSearchFilter` field. This field is read only. \n", + "items": { + "type": "string" + } + } + }, + "required": [ + "type" + ] + }, + "AzureActiveDirectory": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "AZURE_ACTIVE_DIRECTORY" + ] + }, + "status": { + "type": "string", + "description": "This field is read-only, it is reflected in currentStatus and ignored in desiredState.", + "enum": [ + "CONNECTED_BOUND", + "CONNECTION_FAILURE", + "PENDING" + ] + }, + "groupFilter": { + "type": "string", + "description": "The filter attribute is used to filter groups in an Azure AD tenant. See [here](https://docs.microsoft.com/en-us/graph/query-parameters#filter-parameter) for more details.\n", + "example": "groupTypes/any(c:c+eq+'Unified')" + }, + "oidcConfig": { + "$ref": "#/components/schemas/OIDCConfig" + }, + "groupMappings": { + "type": "array", + "description": "Maps Azure AD Group(s) to NGINX Controller Group(s).\n", + "items": { + "$ref": "#/components/schemas/GroupMapping" + } + }, + "pollIntervalSec": { + "type": "integer", + "description": "Time (sec) interval for refreshing Azure AD information, including the groups and group membership information.\n", + "minimum": 300, + "default": 3600 + }, + "groupCacheTimeSec": { + "type": "integer", + "description": "Time (sec) interval for which the group and group membership information are honored before being considered stale.\n> This value should be double the configured `pollIntervalSec`.\n", + "minimum": 600, + "default": 7200 + }, + "honorStaleGroups": { + "type": "boolean", + "default": false, + "description": "Controls whether a stale Azure AD group membership information is honored or not when authorizing and authenticating users.\n" + }, + "filteredGroups": { + "type": "array", + "description": "List of groups in the Azure AD which satisfy the `groupFilter` field. This field is read-only.\n", + "items": { + "type": "string" + } + } + }, + "required": [ + "type" + ] + }, + "AuthConnection": { + "type": "object", + "description": "Connection parameters for an external authentication provider.\n", + "properties": { + "uri": { + "type": "string", + "description": "Connection URI. \n", + "format": "uri", + "pattern": "^ldaps?:\\/\\/([\\w\\-]+\\.)+([\\w\\-]*)(:[0-9]+)?$", + "example": "ldap://dc1.example.com" + }, + "sslMode": { + "type": "string", + "description": "SSL connection parameters.\n- `PLAIN_TEXT`: (Insecure) Unencrypted connection. Does not require SSL certificates.\n- `REQUIRE`: (Default) Require an SSL connection. Unencrypted connections will fail. The server identity is not verified.\n- `VERIFY_CA`: (Most secure) Verify the certificate authority (CA) of the Active Directory connection. The server is verified by checking the certificate chain up to the root certificate stored on the client.\n\n> **Note**: For Production environments, we strongly advise using `VERIFY_CA` for the SSL mode in order to prevent server spoofing.\n", + "enum": [ + "PLAIN_TEXT", + "REQUIRE", + "VERIFY_CA" + ] + }, + "rawCa": { + "type": "string", + "description": "Raw Certificate", + "example": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + } + }, + "required": [ + "uri", + "sslMode" + ] + }, + "GroupMapping": { + "type": "object", + "description": "Authentication provider group to NGINX Controller group mapping.\n", + "properties": { + "caseSensitive": { + "type": "boolean", + "description": "Controls whether or not you want to match the external name exactly or if the match should be case-insensitive.", + "default": false + }, + "external": { + "type": "string", + "description": "The name of the external group.", + "example": "Engineering-US-West" + }, + "internal": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "required": [ + "external", + "internal" + ] + }, + "BindUser": { + "oneOf": [ + { + "$ref": "#/components/schemas/BindUserPassword" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "PASSWORD": "#/components/schemas/BindUserPassword" + } + } + }, + "BindUserPassword": { + "type": "object", + "description": "Credentials used to access data within the AD.\n", + "properties": { + "type": { + "type": "string", + "description": "authentication type.", + "enum": [ + "PASSWORD" + ] + }, + "username": { + "type": "string", + "minLength": 1, + "description": "AD bind username." + }, + "password": { + "type": "string", + "format": "password", + "minLength": 1, + "description": "AD bind user password." + } + }, + "required": [ + "type", + "username", + "password" + ] + }, + "OIDCConfig": { + "type": "object", + "description": "OpenID Connect config for an external identity provider.\n", + "properties": { + "providerURI": { + "type": "string", + "description": "Identity provider URL. It contains the tenant ID for Azure Identity provider. \n", + "format": "uri", + "example": "https://login.microsoftonline.com/d45dfd66-6a3b-40d1-9be0-bf8327d81c56/v2.0" + }, + "clientID": { + "type": "string", + "description": "The Application (client) ID that the identity provider assigns to the NGINX Controller app. For Azure identity providers, you can find it in the portal App registrations.\n", + "example": "781729sd-87b2-4333-9730-338d3a87340b" + }, + "clientSecret": { + "type": "string", + "description": "A secret string that the application uses to prove its identity when requesting a token.\n", + "example": "Ic767hk_8anvoip76v" + }, + "scopes": { + "type": "array", + "description": "OpenID Connect Clients use scope values to specify what access privileges are being requested. See [here](https://openid.net/specs/openid-connect-basic-1_0.html#Scopes) for more information.", + "items": { + "type": "string" + }, + "default": [ + "openid", + "email", + "profile" + ] + }, + "redirectURI": { + "type": "string", + "description": "The redirect URI of NGINX controller, where the identity provider sends authentication responses to. This field is read-only. It is reflected in currentStatus and ignored in desiredState. Set this to be one of the redirect URIs in app registration for NGINX Controller in the identity provider's portal.", + "format": "uri", + "example": "https://54.212.107.157/api/v1/platform/oidc-handler" + } + }, + "required": [ + "providerURI", + "clientID", + "clientSecret" + ] + }, + "NamedLinks": { + "allOf": [ + { + "$ref": "#/components/schemas/SelfLinks" + }, + { + "type": "object", + "description": "Contains information about the object being referred to.\n\nThese are generally details -- like the object name and display name --\nthat are useful to a consumer of the API that performs further\nprocessing. \n\nThis object is only present in responses.\n \n", + "properties": { + "name": { + "type": "string", + "example": "production", + "description": "The name of the linked resource.\n" + }, + "displayName": { + "type": "string", + "example": "Production Environment", + "description": "A user friendly resource name." + } + } + } + ] + }, + "ResourceRef": { + "type": "object", + "required": [ + "ref" + ], + "properties": { + "ref": { + "type": "string", + "example": "/services/environments/prod", + "description": "A reference to another NGINX Controller resource.\n" + }, + "links": { + "$ref": "#/components/schemas/NamedLinks" + } + } + }, + "GlobalSettings": { + "type": "object", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "type": "object", + "properties": { + "agentSettings": { + "$ref": "#/components/schemas/AgentSettingsData" + }, + "platformSettings": { + "$ref": "#/components/schemas/PlatformSettingsData" + } + } + }, + "currentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "agentSettings": { + "$ref": "#/components/schemas/AgentSettingsData" + }, + "platformVersion": { + "$ref": "#/components/schemas/PlatformVersionData" + }, + "platformSettings": { + "$ref": "#/components/schemas/PlatformSettingsData" + } + } + } + } + }, + "AgentSettingsData": { + "type": "object", + "description": "Sets the global settings for NGINX Controller Agent.", + "properties": { + "enableNGINXConfigAnalyzer": { + "type": "boolean", + "description": "Disable or enable NGINX configuration file analysis.", + "default": true + }, + "enablePeriodicNGINX-T": { + "type": "boolean", + "description": "Disable or enable periodic NGINX configuration syntax checking with \\\"nginx -t\\\".", + "default": false + }, + "enableAnalyzeSSLCertificates": { + "type": "boolean", + "description": "Disable or enable analyzing SSL certs.", + "default": true + }, + "apiKey": { + "type": "string", + "description": "Access token to retrieve Agent installer." + } + } + }, + "PlatformVersionData": { + "type": "object", + "description": "Returns the version information for NGINX Controller and its sub-components.", + "required": [ + "version", + "buildNumber", + "components" + ], + "properties": { + "version": { + "type": "string", + "description": "The version number." + }, + "buildNumber": { + "type": "string", + "description": "Build number of the artifact used for installation." + }, + "components": { + "type": "array", + "description": "The NGINX Controller sub-components.", + "items": { + "$ref": "#/components/schemas/componentDef" + } + } + }, + "example": { + "version": "3.0.0", + "buildNumber": "2313201", + "components": [ + { + "application": "secrets", + "version": "0.9.1" + }, + { + "application": "platform-mgr", + "version": "0.2.2" + } + ] + } + }, + "componentDef": { + "type": "object", + "required": [ + "application", + "version" + ], + "properties": { + "application": { + "type": "string" + }, + "version": { + "type": "string" + } + }, + "example": { + "application": "secrets", + "version": "0.9.1" + } + }, + "PlatformSettingsData": { + "type": "object", + "description": "Sets the platform settings for NGINX Controller.", + "properties": { + "apigwCert": { + "type": "string", + "description": "SSL/TLS certificate (PEM format) for API Gateway.\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\n", + "example": "-----BEGIN CERTIFICATE-----\\n MIICpzCCAhACCQDkjx7mP9cuRjANBgkqhkiG9w0BAQsFADCBlzELMAkGA1UEBhMC\\n MiJVGawyxDzBm2UhzNOE0ABHfjAgM6PAYmtMhhQawk6bmttXYhJeqhLSji4LEj5d\\n Z4FmXQ5rWM0RWBs=\\n -----END CERTIFICATE-----" + }, + "apigwKey": { + "type": "string", + "writeOnly": true, + "description": "Private key (PEM format) for API Gateway. Key must match certificate.\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\n", + "example": "-----BEGIN PRIVATE KEY-----\\n MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALSQBtRafNJtTqN0\\n nYiZq6TZUsHjfG2R9PlK6jsvno9O6amN96Al6ZSTTDjhr4VU7/RJ0p/cisiCboCX\\n 4cCq6lFKpIpeZJI=\\n -----END PRIVATE KEY-----" + }, + "FQDN": { + "type": "string", + "description": "Fully qualified domain name for the NGINX Controller server, which users and Controller Agents will use when connecting to NGINX Controller." + }, + "smtpHost": { + "type": "string", + "description": "SMTP host" + }, + "smtpPort": { + "type": "integer", + "description": "SMTP port", + "minimum": 1, + "maximum": 65535 + }, + "smtpAuthentication": { + "type": "boolean", + "description": "Specify if SMTP server requires username and password." + }, + "smtpUseTLS": { + "type": "boolean", + "description": "Specify if SMTP should use TLS." + }, + "smtpFrom": { + "type": "string", + "description": "Specify the email to show in the 'FROM' field." + }, + "smtpUser": { + "type": "string", + "description": "SMTP user (required when smtpAuthentication is enabled)." + }, + "smtpPassword": { + "type": "string", + "description": "SMTP password (required when smtpAuthentication is enabled)." + }, + "dbType": { + "type": "string", + "enum": [ + "external", + "embedded" + ], + "description": "Config database type - embedded or external. Read-only.", + "readOnly": true, + "example": "embedded" + }, + "dbHost": { + "type": "string", + "description": "Config database host (read-only if using an embedded Config database)." + }, + "dbPort": { + "type": "integer", + "description": "Config database port (read-only if using internal Config database).", + "minimum": 1, + "maximum": 65535 + }, + "dbUser": { + "type": "string", + "description": "Config database username (read-only if using internal Config database)." + }, + "dbPassword": { + "type": "string", + "description": "Config database password (read-only if using internal Config database)." + }, + "dbUseTLS": { + "type": "boolean", + "description": "Specify wether the Config database connection uses TLS or not (read-only if using internal Config database)." + }, + "dbCA": { + "type": "string", + "description": "SSL/TLS Certificate Authority certificate (PEM format) for verifying Config database server certificate. Only required if server certificate is signed by a private CA.\nBecause JSON does not support multi-line strings, you must replace line feed and/or carriage return characters with their literal equivalents as two characters - `\\n`, a backslash and letter `n`.\nRead-only if using internal Config database.\n", + "example": "-----BEGIN CERTIFICATE-----\\nMIICpzCCAhACCQDkjx7mP9cuRjANBgkqhkiG9w0BAQsFADCBlzELMAkGA1UEBhMC\\nZ4FmXQ5rWM0RWBs=\\n-----END CERTIFICATE-----" + }, + "dbClientCert": { + "type": "string", + "description": "SSL/TLS client certificate (PEM format) for authenticating when connecting to the Config database.\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\nRead-only if using internal Config database.\n", + "example": "-----BEGIN CERTIFICATE-----\\nMIICpzCCAhACCQDkjx7mP9cuRjANBgkqhkiG9w0BAQsFADCBlzELMAkGA1UEBhMC\\nZ4FmXQ5rWM0RWBs=\\n-----END CERTIFICATE-----" + }, + "dbClientKey": { + "type": "string", + "description": "Private key (PEM format) for Config database connection. Key must match certificate.\nBecause JSON does not support multi-line strings, you must replace binary line feeds with ASCII line feeds (`\\n`).\nRead-only if using internal Config database.\n", + "example": "-----BEGIN PRIVATE KEY-----\\nMIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALSQBtRafNJtTqN0\\nnYiZq6TZUsHjfG2R9PlK6jsvno9O6amN96Al6ZSTTDjhr4VU7/RJ0p/cisiCboCX\\n4cCq6lFKpIpeZJI=\\n-----END PRIVATE KEY-----" + }, + "tsdbVolumeType": { + "type": "string", + "description": "Time Series database volume type.", + "enum": [ + "local", + "nfs", + "aws" + ], + "readOnly": true + }, + "tsdbNFSHost": { + "type": "string", + "description": "Time Series database NFS host.", + "readOnly": true + }, + "tsdbNFSPath": { + "type": "string", + "description": "Time Series database NFS path.", + "readOnly": true + }, + "tsdbAWSVolumeID": { + "type": "string", + "description": "Time Series database AWS Volume ID.", + "readOnly": true + }, + "clusterFloatingIP": { + "type": "string", + "pattern": "^(?:(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d?)$|^$", + "description": "Floating IPv4 address used to communicate with Controller in multinode/cluster mode.\nThis setting is only valid for local (non-cloud) installations. Set empty value to disable floating IP.\n" + }, + "clusterLoadBalancer": { + "type": "string", + "description": "Hostname or IP of the external cluster load balancer. Read-only.", + "readOnly": true + } + } + }, + "UserDef": { + "type": "object", + "required": [ + "firstName", + "lastName", + "email", + "password" + ], + "properties": { + "account": { + "type": "string", + "minLength": 1, + "maxLength": 64, + "example": 1, + "deprecated": true, + "description": "The account number of the tentant." + }, + "id": { + "type": "integer", + "example": 123, + "description": "Account ID" + }, + "firstName": { + "type": "string", + "minLength": 1, + "maxLength": 64, + "example": "John", + "description": "Given name." + }, + "lastName": { + "type": "string", + "minLength": 1, + "maxLength": 64, + "example": "Doe", + "description": "Surname." + }, + "lastLogin": { + "type": "integer", + "example": 1570132969, + "description": "Unix time (seconds since Epoch) of last login." + }, + "email": { + "type": "string", + "example": "john.doe@example.com", + "description": "An email address that serves as the account's username. This must match the resource's metadata.name property.", + "format": "email" + }, + "authn": { + "type": "string", + "enum": [ + "amplify" + ] + }, + "password": { + "type": "string", + "format": "password", + "minLength": 8, + "maxLength": 64, + "description": "Passwords must meet the following requirements:\n\n - length must be between 8 and 64 characters\n - must contain at least 1 letter\n - must contain at least 1 number\n - must be different from the old password\n\n Dictionary words, mangled dictionary words, or systematic passwords like '1234567a' are not allowed.\n" + }, + "apiKey": { + "type": "string", + "example": "761ab961448865d86ef71c67fd74432b", + "deprecated": true, + "description": "User api key." + }, + "isEnabled": { + "type": "boolean", + "description": "Indicates if the account is enabled.", + "default": false + }, + "roles": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + }, + "groups": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + }, + "User": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/UserDef" + }, + "currentStatus": { + "$ref": "#/components/schemas/UserDef" + } + } + }, + "UpdateUser": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/UpdateUserDef" + }, + "currentStatus": { + "$ref": "#/components/schemas/UserDef" + } + }, + "example": { + "metadata": { + "name": "john.doe@example.com", + "kind": "user", + "createTime": "2019-07-29T09:12:33.001Z", + "displayName": "Jane Doe", + "description": "NGINX Controller user with a production environment role." + }, + "desiredState": { + "firstName": "Jane", + "lastName": "Doe", + "password": "********", + "verifyPassword": "********", + "roles": [ + { + "ref": "/platform/roles/new" + } + ] + }, + "currentStatus": { + "account": "1", + "id": 123, + "firstName": "Jane", + "lastName": "Doe", + "lastLogin": 1570132969, + "email": "john.doe@example.com", + "authn": "amplify", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/prod", + "links": { + "rel": "/api/v1/platform/roles/prod", + "name": "prod" + } + } + ] + } + } + }, + "UpdateUserDef": { + "type": "object", + "properties": { + "firstName": { + "type": "string", + "minLength": 1, + "maxLength": 64, + "example": "Jane", + "description": "Given name." + }, + "lastName": { + "type": "string", + "minLength": 1, + "maxLength": 64, + "example": "Doe", + "description": "Surname." + }, + "password": { + "type": "string", + "format": "password", + "minLength": 8, + "maxLength": 64, + "description": "The user's password must meet the following requirements:\n\n - Length must be between 8 and 64 characters\n - Must contain at least 1 letter\n - Must contain at least 1 number\n - Must be different from the old password\n - Dictionary words, mangled dictionary words, or systematic passwords like '1234567a' are not allowed\n \n" + }, + "verifyPassword": { + "type": "string", + "format": "password", + "description": "The user's current password. Users must verify their current password to make updates to their user profiles. The current password does not need to be verified when an admin user updates other users' profiles." + }, + "roles": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + }, + "example": { + "firstName": "Jane", + "lastName": "Doe", + "password": "********", + "verifyPassword": "********", + "roles": [ + { + "ref": "/platform/roles/new" + } + ] + } + }, + "UserListResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/User" + }, + "description": "List of User resources." + } + }, + "example": { + "items": [ + { + "metadata": { + "name": "john.doe@example.com", + "kind": "user", + "createTime": "2019-07-29T09:12:33.001Z", + "displayName": "John Doe", + "description": "NGINX Controller user with production environment role" + }, + "desiredState": { + "firstName": "John", + "lastName": "Doe", + "email": "john.doe@example.com", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/prod" + } + ] + }, + "currentStatus": { + "account": "1", + "id": 123, + "firstName": "John", + "lastName": "Doe", + "lastLogin": 1570132969, + "email": "john.doe@example.com", + "authn": "amplify", + "password": "********", + "roles": [ + { + "ref": "/platform/roles/prod", + "links": { + "rel": "/api/v1/platform/roles/prod", + "name": "prod" + } + } + ] + } + } + ] + } + }, + "GlobalFeatures": { + "type": "object", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "type": "object", + "properties": { + "featureFlags": { + "type": "array", + "description": "Flags to describe or extend NGINX Controller behavior (optional).", + "items": { + "$ref": "#/components/schemas/FeatureFlagsDef" + } + } + } + }, + "currentStatus": { + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/ConfigState" + }, + "featureFlags": { + "type": "array", + "description": "Flags to describe or extend NGINX Controller behavior (optional).", + "items": { + "$ref": "#/components/schemas/FeatureFlagsDef" + } + } + } + } + } + }, + "FeatureFlagsDef": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "example": { + "name": "reserved", + "value": "true" + } + }, + "ResetPasswordRequest": { + "type": "object", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + } + } + }, + "ResetPassword": { + "type": "object", + "required": [ + "desiredState", + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "type": "object", + "required": [ + "password" + ], + "properties": { + "password": { + "type": "string", + "format": "password", + "example": "TestImpl45!", + "minLength": 8, + "maxLength": 64, + "description": "The user's password must meet the following requirements:\n - Length must be between 8 and 64 characters\n - Must contain at least 1 letter\n - Must contain at least 1 number\n - Must be different from the old password\n - Dictionary words, mangled dictionary words, or systematic passwords like '1234567a' are not allowed.\n" + } + } + } + } + }, + "LicenseRequest": { + "type": "object", + "required": [ + "content" + ], + "properties": { + "content": { + "type": "string", + "format": "password", + "description": "License file contents, encoded as Base64" + } + } + }, + "PutLicenseRequest": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "type": "object", + "required": [ + "content" + ], + "properties": { + "content": { + "type": "string", + "format": "password", + "description": "The customer association token or NGINX Controller license, which can be downloaded from your [MyF5](account.f5.com/myf5) account. The license must be formatted as a base64-encoded string, while the association token is unencoded." + } + } + } + } + }, + "License": { + "type": "object", + "required": [ + "metadata", + "desiredState", + "currentStatus" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/LicenseData" + } + } + } + }, + "currentStatus": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/LicenseData" + } + } + } + } + } + }, + "LicenseResponse": { + "type": "object", + "required": [ + "metadata", + "desiredState", + "currentStatus" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "type": "object", + "properties": { + "content": { + "type": "string", + "format": "password", + "description": "Redacted license information." + }, + "items": { + "type": "array", + "deprecated": true, + "items": { + "$ref": "#/components/schemas/LicenseData" + } + } + } + }, + "currentStatus": { + "type": "object", + "properties": { + "subscription": { + "$ref": "#/components/schemas/Subscription" + }, + "entitlement": { + "$ref": "#/components/schemas/Entitlement" + }, + "state": { + "type": "object", + "required": [ + "currentInstance" + ], + "properties": { + "currentInstance": { + "$ref": "#/components/schemas/CurrentInstance" + } + } + }, + "items": { + "type": "array", + "deprecated": true, + "items": { + "$ref": "#/components/schemas/LicenseData" + } + } + } + } + } + }, + "LicenseData": { + "type": "object", + "required": [ + "product", + "expiry", + "instanceCount", + "serial", + "version", + "type", + "id" + ], + "description": "Defines the features of a given license", + "properties": { + "product": { + "type": "string", + "description": "The name of the product the license enables.", + "example": "NGINX Controller Monitoring" + }, + "expiry": { + "type": "string", + "format": "date-time", + "description": "The date on which the license expires. Represented in Coordinated Universal Time (UTC), specifically using the ISO 8601 standard.", + "example": "1996-02-26T00:00:00.000Z" + }, + "instanceCount": { + "type": "integer", + "description": "The number of instances that are allowed by the license.", + "example": 20 + }, + "serial": { + "type": "integer", + "description": "A unique identifier for the license.", + "example": 20145 + }, + "version": { + "type": "integer", + "description": "The product version number.", + "example": 1 + }, + "type": { + "type": "string", + "description": "The license type. Lowercase values are deprecated.", + "enum": [ + "production", + "beta", + "internal", + "partner", + "trial", + "PRODUCTION", + "BETA", + "INTERNAL", + "PARTNER", + "TRIAL", + "PAID", + "EVAL" + ] + }, + "id": { + "type": "string", + "description": "Unique identifier for a product.", + "enum": [ + "UNKNOWN", + "ADC", + "APIM", + "ANALYTICS" + ] + } + } + }, + "Subscription": { + "type": "object", + "required": [ + "id" + ], + "description": "Defines the features of a subscription.", + "properties": { + "id": { + "type": "string", + "format": "uuid", + "example": "b6d7c577-b708-44ad-839c-9743f85fcf7c", + "description": "Subscription ID." + } + } + }, + "Entitlement": { + "type": "object", + "required": [ + "features" + ], + "description": "Defines the elements of an entitlement.", + "properties": { + "features": { + "type": "array", + "description": "Product features.", + "items": { + "$ref": "#/components/schemas/Feature" + } + } + } + }, + "Feature": { + "type": "object", + "required": [ + "name", + "unitOfMeasure", + "id", + "type" + ], + "description": "Defines a product feature, consumption metric, and metric usage limit.", + "properties": { + "name": { + "type": "string", + "description": "Name of the feature.", + "example": "NGINX Controller Load Balancing" + }, + "limit": { + "type": "integer", + "description": "Maximum limit for the consumption metric.", + "example": 1, + "minimum": 0 + }, + "unlimited": { + "type": "boolean", + "description": "Indicates whether there is a limit for the consumption metric or not.", + "example": true + }, + "unitOfMeasure": { + "$ref": "#/components/schemas/UnitOfMeasure" + }, + "type": { + "type": "string", + "description": "The license type.", + "example": "PAID", + "enum": [ + "PRODUCTION", + "BETA", + "INTERNAL", + "PARTNER", + "TRIAL", + "PAID", + "EVAL" + ] + }, + "id": { + "$ref": "#/components/schemas/FeatureID" + }, + "expiry": { + "type": "string", + "format": "date-time", + "description": "The date on which the license expires. Represented in Coordinated Universal Time (UTC), specifically using the ISO 8601 standard.", + "example": "1996-02-26T00:00:00.000Z" + }, + "gracePeriodDays": { + "type": "integer", + "description": "Number of grace period days after the license expires.", + "example": 30, + "minimum": 0 + } + } + }, + "CurrentInstance": { + "type": "object", + "required": [ + "type", + "version", + "status", + "features", + "configState" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid", + "example": "8ce9b80a-f7fc-48fd-ac28-8d5f3fe898d6", + "description": "Unique identifier for the current instance." + }, + "telemetryLastReported": { + "type": "string", + "format": "date-time", + "description": "Date and time when telemetry data was last reported from the current instance. Represented in Coordinated Universal Time (UTC), specifically using the ISO 8601 standard.", + "example": "1996-02-26T00:00:00.000Z" + }, + "type": { + "type": "string", + "example": "NGINX Controller", + "description": "Product type." + }, + "status": { + "type": "string", + "description": "Status of the current instance with respect to the license.\n\n- `NONE`: The current instance is not yet licensed.\n- `INVALID`: The current instance has at least one entitlement in non-functional license status.\n- `ENFORCED`: The current instance has at least one entitlement in enforced license status and no entitlements in non-functional status.\n- `GRACE`: The current instance has at least one entitlement in grace period license status and no entitlements in enforced or non-functional status.\n- `VALID`: The current instance has valid entitlement(s) that have no adverse license statuses.\n- `CORRUPTED`: The license for the current instance is corrupted. Upload the license file again to rectify the status.\n", + "enum": [ + "NONE", + "INVALID", + "ENFORCED", + "GRACE", + "VALID", + "CORRUPTED" + ] + }, + "version": { + "type": "string", + "example": "3.3.0", + "description": "Product version." + }, + "configState": { + "$ref": "#/components/schemas/ConfigState" + }, + "features": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FeatureStatus" + }, + "description": "List of the available features and their current usage." + } + } + }, + "FeatureStatus": { + "type": "object", + "required": [ + "name", + "unitOfMeasure", + "id" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the product feature.", + "example": "NGINX Controller Load Balancing" + }, + "used": { + "type": "number", + "description": "Amount of the feature used by the current NGINX Controller instance.", + "example": 1, + "minimum": 0 + }, + "aggregateUsed": { + "type": "number", + "description": "Amount of the feature used by all NGINX Controller instances in the bucket.", + "example": 1, + "minimum": 0 + }, + "remaining": { + "type": "number", + "description": "Amount of the feature remaining.", + "example": 1, + "minimum": 0 + }, + "unitOfMeasure": { + "$ref": "#/components/schemas/UnitOfMeasure" + }, + "id": { + "$ref": "#/components/schemas/FeatureID" + }, + "daysUntilExpiry": { + "type": "integer", + "description": "Defines when the current entitlement expires.", + "example": 20, + "minimum": 0 + }, + "gracePeriodRemainingDays": { + "type": "integer", + "description": "Defines the remaining grace period days after the license expires.", + "minimum": 0 + } + } + }, + "UnitOfMeasure": { + "type": "string", + "enum": [ + "INSTANCES", + "WORKLOADS", + "DATA_PER_HOUR_IN_GB", + "SUCCESSFUL_API_CALLS_IN_MILLIONS", + "SUCCESSFUL_API_CALLS" + ], + "description": "Unit of measure used for computing consumption.", + "example": "WORKLOADS" + }, + "FeatureID": { + "type": "string", + "enum": [ + "UNKNOWN", + "ADC", + "APIM", + "ANALYTICS" + ], + "example": "ADC", + "description": "Unique identifier for a product feature." + }, + "LicenseName": { + "type": "string", + "description": "Name of the License resource.", + "example": "controller-provided" + }, + "NginxPlusLicensesList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NginxPlusLicenseResponse" + } + } + } + }, + "NginxPlusLicenseResponse": { + "type": "object", + "required": [ + "metadata", + "currentStatus", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "currentStatus": { + "$ref": "#/components/schemas/NginxPlusKeys" + }, + "desiredState": { + "$ref": "#/components/schemas/NginxPlusKeys" + } + }, + "example": { + "metadata": { + "name": "controller-provided", + "ref": "/platform/licenses/nginx-plus-licenses/controller-provided", + "kind": "license" + }, + "currenStatus": { + "certKey": "a valid certificate key for NGINX Plus", + "privateKey": "a valid private key for NGINX Plus" + }, + "desiredState": { + "certKey": "a valid certificate key for NGINX Plus", + "privateKey": "a valid private key for NGINX Plus" + } + } + }, + "NginxPlusKeys": { + "type": "object", + "required": [ + "certKey", + "privateKey" + ], + "properties": { + "certKey": { + "type": "string", + "description": "Contents of the certificate file that is required to install NGINX Plus.\n" + }, + "privateKey": { + "type": "string", + "description": "Contents of the key file that is required to install NGINX Plus.\n" + } + } + }, + "Login": { + "type": "object", + "properties": { + "credentials": { + "oneOf": [ + { + "$ref": "#/components/schemas/BasicAuth" + }, + { + "$ref": "#/components/schemas/ActiveDirectoryAuth" + }, + { + "$ref": "#/components/schemas/AzureActiveDirectoryAuth" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "BASIC": "#/components/schemas/BasicAuth", + "ACTIVE_DIRECTORY": "#/components/schemas/ActiveDirectoryAuth", + "AZURE_ACTIVE_DIRECTORY": "#/components/schemas/AzureActiveDirectoryAuth" + } + }, + "example": { + "type": "BASIC", + "username": "guest@example.com", + "password": "********" + } + } + } + }, + "AzureActiveDirectoryAuth": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "AZURE_ACTIVE_DIRECTORY" + ] + }, + "providerName": { + "type": "string", + "description": "The Azure active directory provider name configured within NGINX Controller.\n" + } + }, + "required": [ + "type", + "providerName" + ] + }, + "ActiveDirectoryAuth": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ACTIVE_DIRECTORY" + ] + }, + "username": { + "type": "string", + "description": "The username for the NGINX Controller User account.\n" + }, + "password": { + "type": "string", + "description": "The password for the NGINX Controller User account.\n" + }, + "providerName": { + "type": "string", + "description": "The active directory provider name configured within NGINX Controller.\n" + } + }, + "required": [ + "type", + "username", + "password", + "providerName" + ] + }, + "BasicAuth": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "BASIC" + ] + }, + "username": { + "type": "string", + "description": "The username for the NGINX Controller User account.\n" + }, + "password": { + "type": "string", + "description": "The password for the NGINX Controller User account.\n" + } + }, + "required": [ + "type", + "username", + "password" + ] + }, + "ListReadyProviders": { + "type": "object", + "required": [ + "items" + ], + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SimpleAuthProvider" + } + } + } + }, + "SimpleAuthProvider": { + "type": "object", + "required": [ + "name", + "type" + ], + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "BASIC", + "ACTIVE_DIRECTORY", + "AZURE_ACTIVE_DIRECTORY" + ] + } + } + }, + "Integration": { + "type": "object", + "description": "The Integration resource defines the account credentials and endpoint needed for NGINX Controller to connect to external services.\n", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/IntegrationState" + }, + "currentStatus": { + "$ref": "#/components/schemas/IntegrationState" + } + } + }, + "GetIntegrationResponse": { + "allOf": [ + { + "$ref": "#/components/schemas/Integration" + } + ] + }, + "ListIntegrationResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Integration" + } + } + } + }, + "UpdateIntegration": { + "type": "object", + "required": [ + "metadata" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/UpdateIntegrationState" + }, + "currentStatus": { + "$ref": "#/components/schemas/UpdateIntegrationState" + } + } + }, + "UpdateIntegrationState": { + "oneOf": [ + { + "$ref": "#/components/schemas/AWSIntegrationForUpdate" + }, + { + "$ref": "#/components/schemas/AzureIntegrationForUpdate" + }, + { + "$ref": "#/components/schemas/GenericIntegrationForUpdate" + }, + { + "$ref": "#/components/schemas/VSphereIntegrationForUpdate" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AWS_INTEGRATION": "#/components/schemas/AWSIntegrationForUpdate", + "AZURE_INTEGRATION": "#/components/schemas/AzureIntegrationForUpdate", + "GENERIC_INTEGRATION": "#/components/schemas/GenericIntegrationForUpdate", + "VSPHERE_INTEGRATION": "#/components/schemas/VSphereIntegrationForUpdate" + } + } + }, + "AWSIntegrationForUpdate": { + "description": "The AWS integration stores the AWS access key for programmatic access to the AWS cloud API.\n", + "required": [ + "type" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The AWS_INTEGRATION is an integration for connecting to Amazon Web Services (AWS).", + "enum": [ + "AWS_INTEGRATION" + ] + }, + "endpointUri": { + "type": "string", + "description": "The URI of the AWS cloud service endpoint." + }, + "credential": { + "description": "Account credentials for AWS cloud API.\n", + "oneOf": [ + { + "$ref": "#/components/schemas/AWSAccessKeyCredential" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AWS_ACCESS_KEY": "#/components/schemas/AWSAccessKeyCredential" + } + } + } + } + }, + "AzureIntegrationForUpdate": { + "description": "The Azure integration stores the Azure credentials for programmatic access to the Azure cloud API.\n", + "required": [ + "type" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The AZURE_INTEGRATION is an integration for connecting to Microsoft Azure.", + "enum": [ + "AZURE_INTEGRATION" + ] + }, + "credential": { + "description": "Account credentials for Azure cloud API.\n", + "oneOf": [ + { + "$ref": "#/components/schemas/AzureServicePrincipal" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AZURE_SERVICE_PRINCIPAL": "#/components/schemas/AzureServicePrincipal" + } + } + } + } + }, + "VSphereIntegrationForUpdate": { + "type": "object", + "description": "The VSphere integration stores the credentials for programmatic access to the VSphere cloud API.\n", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "description": "The VSPHERE_INTEGRATION is an Integration for connecting to the VSphere datacenter.\n", + "enum": [ + "VSPHERE_INTEGRATION" + ] + }, + "hostname": { + "type": "string", + "description": "VSphere instance hostname ( FQDN or IP ), Connection to it from the controller is through https, the VSphere server certificate is not verified." + }, + "rawCa": { + "type": "string", + "description": "Raw CA Certificate used to verify the vSphere server certificate", + "example": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + }, + "credential": { + "oneOf": [ + { + "$ref": "#/components/schemas/UserPassCredential" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "USER_PASS": "#/components/schemas/UserPassCredential" + } + } + } + } + }, + "GenericIntegrationForUpdate": { + "required": [ + "type" + ], + "type": "object", + "description": "A Generic integration stores the credentials and URI endpoint to connect to external services\nthat require a username and password or an API token for authentication, or accept an unauthenticated connection.\n", + "properties": { + "type": { + "type": "string", + "description": "The GENERIC_INTEGRATION is an integration for connecting to external services that require a\nusername and password or an API key, or accept an unauthenticated connection.\n", + "enum": [ + "GENERIC_INTEGRATION" + ] + }, + "endpointUri": { + "type": "string", + "description": "The URI of the service endpoint." + }, + "credential": { + "oneOf": [ + { + "$ref": "#/components/schemas/UserPassCredential" + }, + { + "$ref": "#/components/schemas/ApiKeyCredential" + }, + { + "$ref": "#/components/schemas/Unauthenticated" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "USER_PASS": "#/components/schemas/UserPassCredential", + "API_KEY": "#/components/schemas/ApiKeyCredential", + "UNAUTHENTICATED": "#/components/schemas/Unauthenticated" + } + } + } + } + }, + "IntegrationState": { + "oneOf": [ + { + "$ref": "#/components/schemas/AWSIntegration" + }, + { + "$ref": "#/components/schemas/AzureIntegration" + }, + { + "$ref": "#/components/schemas/GenericIntegration" + }, + { + "$ref": "#/components/schemas/VSphereIntegration" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AWS_INTEGRATION": "#/components/schemas/AWSIntegration", + "AZURE_INTEGRATION": "#/components/schemas/AzureIntegration", + "GENERIC_INTEGRATION": "#/components/schemas/GenericIntegration", + "VSPHERE_INTEGRATION": "#/components/schemas/VSphereIntegration" + } + } + }, + "AzureIntegration": { + "description": "The Azure integration stores the Azure credentials for programmatic access to the Azure cloud API.\n", + "required": [ + "type", + "credential" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The AZURE_INTEGRATION is an integration for connecting to Microsoft Azure.", + "enum": [ + "AZURE_INTEGRATION" + ] + }, + "credential": { + "description": "Account credentials for Azure cloud API.\n", + "oneOf": [ + { + "$ref": "#/components/schemas/AzureServicePrincipal" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AZURE_SERVICE_PRINCIPAL": "#/components/schemas/AzureServicePrincipal" + } + } + } + } + }, + "AWSIntegration": { + "description": "The AWS integration stores the AWS access key for programmatic access to the AWS cloud API.\n", + "required": [ + "type", + "credential" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The AWS_INTEGRATION is an integration for connecting to Amazon Web Services (AWS).", + "enum": [ + "AWS_INTEGRATION" + ] + }, + "endpointUri": { + "type": "string", + "description": "The URI of the AWS cloud service endpoint." + }, + "credential": { + "description": "Account credentials for AWS cloud API.\n", + "oneOf": [ + { + "$ref": "#/components/schemas/AWSAccessKeyCredential" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "AWS_ACCESS_KEY": "#/components/schemas/AWSAccessKeyCredential" + } + } + } + } + }, + "AWSAccessKeyCredential": { + "required": [ + "type", + "accessKeyID", + "secretAccessKey" + ], + "type": "object", + "description": "AWS access key credentials: access key ID and secret access key.\n", + "properties": { + "type": { + "type": "string", + "description": "AWS_ACCESS_KEY credentials are used to access the AWS API programmatically.\n", + "enum": [ + "AWS_ACCESS_KEY" + ] + }, + "accessKeyID": { + "type": "string", + "description": "The access key ID of the AWS access key credential." + }, + "secretAccessKey": { + "type": "string", + "description": "The secret access key of the AWS access key credential." + } + } + }, + "AzureServicePrincipal": { + "required": [ + "type", + "clientID", + "clientSecret", + "tenantID" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "", + "enum": [ + "AZURE_SERVICE_PRINCIPAL" + ] + }, + "clientID": { + "type": "string", + "description": "The Client ID of the service principal." + }, + "clientSecret": { + "type": "string", + "description": "The secret associated with the service principal." + }, + "tenantID": { + "type": "string", + "description": "The ID for the Active Directory tenant to which the service principal belongs." + } + } + }, + "VSphereIntegration": { + "type": "object", + "description": "The VSphere integration stores the credentials for programmatic access to the VSphere cloud API.\n", + "required": [ + "type", + "hostname", + "credential" + ], + "properties": { + "type": { + "type": "string", + "description": "The VSPHERE_INTEGRATION is an Integration for connecting to the VSphere cloud.\n", + "enum": [ + "VSPHERE_INTEGRATION" + ] + }, + "hostname": { + "type": "string", + "description": "VSphere instance hostname ( FQDN or IP ), Connection to it from the controller is through https, the VSphere server certificate is not verified." + }, + "rawCa": { + "type": "string", + "description": "Raw CA Certificate used to verify the vSphere server certificate", + "example": "-----BEGIN CERTIFICATE-----\n MIIDMTCCasdfAwIBAgIUWw25xyNvQPxtjGUZopz0KeoaaQkwDQYJKoZIhvcNAQEL\n [SNIP]\n 2CPqdhpmQhAbzx9ElT8KyC7/08IdV0JK/kSWjfE4jOPOJyI2q0DWExKLuYe+rO+Q\n rg7hesA=\n -----END CERTIFICATE-----" + }, + "credential": { + "oneOf": [ + { + "$ref": "#/components/schemas/UserPassCredential" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "USER_PASS": "#/components/schemas/UserPassCredential" + } + } + } + } + }, + "GenericIntegration": { + "required": [ + "type", + "endpointUri", + "credential" + ], + "type": "object", + "description": "A Generic integration resource stores the credentials and URI endpoint to connect to external services\nthat require a username and password or API token for authentication, or accept unauthenticated connection.\n", + "properties": { + "type": { + "type": "string", + "description": "The GENERIC_INTEGRATION is an Integration for connecting to external services that require a\nusername and password or an API key, or accept an unauthenticated connection.\n", + "enum": [ + "GENERIC_INTEGRATION" + ] + }, + "endpointUri": { + "type": "string", + "description": "The URI of the service endpoint." + }, + "credential": { + "oneOf": [ + { + "$ref": "#/components/schemas/UserPassCredential" + }, + { + "$ref": "#/components/schemas/ApiKeyCredential" + }, + { + "$ref": "#/components/schemas/Unauthenticated" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "USER_PASS": "#/components/schemas/UserPassCredential", + "API_KEY": "#/components/schemas/ApiKeyCredential", + "UNAUTHENTICATED": "#/components/schemas/Unauthenticated" + } + } + } + } + }, + "UserPassCredential": { + "description": "A credential consisting of a username and password. In the case of splunk HEC, username can be any string.", + "required": [ + "type", + "password", + "username" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "USER_PASS credentials are used to connect to web services that accept a username and password for authentication.\n", + "enum": [ + "USER_PASS" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + } + } + }, + "ApiKeyCredential": { + "description": "For API key based authentication.", + "required": [ + "type", + "apiKey" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "API_KEY credentials are used to connect to web services that accept an API key for authentication.\n", + "enum": [ + "API_KEY" + ] + }, + "apiKey": { + "type": "string" + } + } + }, + "Unauthenticated": { + "description": "For unauthenticated connections.", + "required": [ + "type" + ], + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Unauthenticated can be used for services that do not need authentication.\n", + "enum": [ + "UNAUTHENTICATED" + ] + } + } + }, + "GroupListResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + }, + "description": "Contains list of authentication groups.\n" + } + } + }, + "Group": { + "type": "object", + "required": [ + "metadata", + "desiredState" + ], + "properties": { + "metadata": { + "$ref": "#/components/schemas/ResourceMeta" + }, + "desiredState": { + "$ref": "#/components/schemas/GroupDef" + }, + "currentStatus": { + "$ref": "#/components/schemas/GroupDef" + } + } + }, + "GroupDef": { + "type": "object", + "required": [ + "roles" + ], + "description": "An authentication group (or \"AuthN Group\") is a collection of Roles. All AuthN Groups are globally unique. You can assign AuthN Groups to external authentication provider properties (such as an AD Group).\n", + "properties": { + "roles": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/components/schemas/ResourceRef" + } + } + } + } + }, + "examples": { + "AWSRequest": { + "value": { + "metadata": { + "name": "my-aws-integration", + "description": "AWS integration for us-west-1", + "displayName": "UsWest1-Integration", + "tags": [ + "us-west-1" + ] + }, + "desiredState": { + "type": "AWS_INTEGRATION", + "endpointUri": "https://ec2.us-west-1.amazonaws.com", + "credential": { + "type": "AWS_ACCESS_KEY", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY)" + } + } + } + }, + "AWSResponse": { + "value": { + "metadata": { + "name": "my-aws-integration", + "description": "AWS integration for us-west-1", + "displayName": "UsWest1-Integration", + "tags": [ + "us-west-1" + ], + "uid": "619887de-a748-4931-853d-c6b706f95ddf", + "createTime": "2019-09-18T16:42:15.1183523Z", + "updateTime": "2019-09-18T16:42:15.1183523Z" + }, + "desiredState": { + "type": "AWS_INTEGRATION", + "endpointUri": "https://ec2.us-west-1.amazonaws.com", + "credential": { + "type": "AWS_ACCESS_KEY", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + }, + "currentStatus": { + "type": "AWS_INTEGRATION", + "endpointUri": "https://ec2.us-west-1.amazonaws.com", + "credential": { + "type": "AWS_ACCESS_KEY", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + } + } + }, + "SplunkRequest": { + "value": { + "metadata": { + "name": "my-splunk-integration", + "description": "Splunk integration", + "displayName": "splunk-Integration", + "tags": [ + "dev" + ] + }, + "desiredState": { + "type": "GENERIC_INTEGRATION", + "endpointUri": "https://splunk.com:8080/services/collector", + "credential": { + "type": "USER_PASS", + "user": "x", + "password": "********" + } + } + } + }, + "SplunkResponse": { + "value": { + "metadata": { + "name": "my-splunk-integration", + "description": "Splunk integration", + "displayName": "splunk-Integration", + "tags": [ + "dev" + ], + "uid": "619887de-a748-4931-853d-c6b706f95ddf", + "createTime": "2019-09-18T16:42:15.1183523Z", + "updateTime": "2019-09-18T16:42:15.1183523Z" + }, + "desiredState": { + "type": "GENERIC_INTEGRATION", + "endpointUri": "https://splunk.com:8080/services/collector", + "credential": { + "type": "USER_PASS", + "user": "x", + "password": "*********" + } + }, + "currentStatus": { + "type": "GENERIC_INTEGRATION", + "endpointUri": "https://splunk.com:8080/services/collector", + "credential": { + "type": "USER_PASS", + "user": "x", + "password": "*********" + } + } + } + }, + "AWSRequestForPatch": { + "value": { + "metadata": { + "name": "my-aws-integration", + "description": "AWS integration for us-west-1", + "displayName": "UsWest1-Integration", + "tags": [ + "us-west-1" + ] + }, + "desiredState": { + "type": "AWS_INTEGRATION", + "credential": { + "type": "AWS_ACCESS_KEY", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + } + } + }, + "AWSResponseForPatch": { + "value": { + "metadata": { + "name": "my-aws-integration", + "description": "AWS integration for us-west-1", + "displayName": "UsWest1-Integration", + "tags": [ + "us-west-1" + ], + "uid": "619887de-a748-4931-853d-c6b706f95ddf", + "createTime": "2019-09-18T16:42:15.1183523Z", + "updateTime": "2019-09-18T16:42:15.1183523Z" + }, + "desiredState": { + "type": "AWS_INTEGRATION", + "endpointUri": "https://ec2.us-west-1.amazonaws.com", + "credential": { + "type": "AWS_ACCESS_KEY", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + }, + "currentStatus": { + "type": "AWS_INTEGRATION", + "endpointUri": "https://ec2.us-west-1.amazonaws.com", + "credential": { + "type": "AWS_ACCESS_KEY", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + } + } + }, + "SplunkRequestForPatch": { + "value": { + "metadata": { + "name": "my-splunk-integration", + "description": "Splunk integration", + "displayName": "splunk-Integration", + "tags": [ + "dev" + ] + }, + "desiredState": { + "type": "GENERIC_INTEGRATION", + "credential": { + "type": "USER_PASS", + "user": "x", + "password": "********" + } + } + } + }, + "SplunkResponseForPatch": { + "value": { + "metadata": { + "name": "my-splunk-integration", + "description": "Splunk integration", + "displayName": "splunk-Integration", + "tags": [ + "dev" + ], + "uid": "619887de-a748-4931-853d-c6b706f95ddf", + "createTime": "2019-09-18T16:42:15.1183523Z", + "updateTime": "2019-09-18T16:42:15.1183523Z" + }, + "desiredState": { + "type": "GENERIC_INTEGRATION", + "endpointUri": "https://splunk.com:8080/services/collector", + "credential": { + "type": "USER_PASS", + "user": "x", + "password": "*********" + } + }, + "currentStatus": { + "type": "GENERIC_INTEGRATION", + "endpointUri": "https://splunk.com:8080/services/collector", + "credential": { + "type": "USER_PASS", + "user": "x", + "password": "*********" + } + } + } + }, + "ListIntegrationResponse": { + "value": { + "items": [ + { + "metadata": { + "name": "my-aws-integration", + "description": "AWS integration for us-west-1", + "displayName": "UsWest1-Integration", + "tags": [ + "us-west-1" + ], + "uid": "619887de-a748-4931-853d-c6b706f95ddf", + "createTime": "2019-09-18T16:42:15.1183523Z", + "updateTime": "2019-09-18T16:42:15.1183523Z" + }, + "desiredState": { + "type": "AWS_INTEGRATION", + "endpointUri": "https://ec2.us-west-1.amazonaws.com", + "credential": { + "type": "AWS_ACCESS_KEY", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + }, + "currentStatus": { + "type": "AWS_INTEGRATION", + "endpointUri": "https://ec2.us-west-1.amazonaws.com", + "credential": { + "type": "AWS_ACCESS_KEY", + "accessKeyID": "AKIAJ3N2PZUMZE3Y67ZA", + "secretAccessKey": "*********" + } + } + }, + { + "metadata": { + "name": "my-splunk-integration", + "description": "Splunk integration", + "displayName": "splunk-Integration", + "tags": [ + "dev" + ], + "uid": "619887de-a748-4931-853d-c6b706f95ddf", + "createTime": "2019-09-18T16:42:15.1183523Z", + "updateTime": "2019-09-18T16:42:15.1183523Z" + }, + "desiredState": { + "type": "GENERIC_INTEGRATION", + "endpointUri": "https://splunk.com:8080/services/collector", + "credential": { + "type": "USER_PASS", + "user": "x", + "password": "*****" + } + }, + "currentStatus": { + "type": "GENERIC_INTEGRATION", + "endpointUri": "https://splunk.com:8080/services/collector", + "credential": { + "type": "USER_PASS", + "user": "x", + "password": "*****" + } + } + } + ] + } + }, + "NotFoundError": { + "value": { + "code": 120003, + "message": "Error getting the Integration resource: the specified integration does not exist. Check the name of the Integration resource, then try again." + } + }, + "BadRequestError": { + "value": { + "code": 120004, + "message": "Error creating the Integration resource: could not parse the request payload. Check the format of the request, then try again." + } + }, + "AlreadyExistsError": { + "value": { + "code": 120007, + "message": "Error creating the Integration resource: the integration already exists. Use a unique name for the Integration resource, then try again." + } + } + } + } +} diff --git a/content/controller/app-delivery/_index.md b/content/controller/app-delivery/_index.md new file mode 100644 index 000000000..e6ae5aeea --- /dev/null +++ b/content/controller/app-delivery/_index.md @@ -0,0 +1,11 @@ +--- +aliases: +- /services/apps/_index.md +description: Tasks for deploying and managing your applications. +menu: + docs: + parent: NGINX Controller +title: Application Delivery +weight: 152 +url: /nginx-controller/app-delivery/ +--- diff --git a/content/controller/app-delivery/about-app-delivery.md b/content/controller/app-delivery/about-app-delivery.md new file mode 100644 index 000000000..b87308017 --- /dev/null +++ b/content/controller/app-delivery/about-app-delivery.md @@ -0,0 +1,43 @@ +--- +description: Learn about F5 NGINX Controller Application Delivery concepts. +docs: DOCS-474 +title: About Application Delivery +toc: true +weight: 100 +--- + +## Apps + +In F5 NGINX Controller, an App serves as a container for one or more Components. Components represent the backend services that comprise your application. Together, an App and its Components represent the logical partitioning of your application into its composite parts. For example, a Component might correspond to a particular microservice within your application. Each Component you add to an App represents one or more paths via which traffic can reach that microservice. + +All Apps and Components live within an [Environment]({{< relref "/controller/services/manage-environments.md" >}}). This means that in order to have access to a particular App, a User needs to have permission to access its Environment. If you need access to an Environment or App, contact your administrator. + +## Components + +A Component is a child object of an App. Components let you partition an App into smaller, self-contained pieces that are each responsible for a particular function of the overall application. For example, a Component could correspond to a microservice that, together with several other microservices, comprises a complete application. + +Each Component contains an ingress definition that includes the fully-qualified domain names (FQDNs) and URIs from clients. These ingress definitions associate incoming requests with a particular path; the certificates that are used for decryption/encryption of HTTPS requests and responses that traverse that path; the backend servers that host the App to which the path delivers the requests; and the rewrites, redirects, and modifications on the requests/responses that occur along the path. + +Components can be instantiated on multiple paths corresponding to the placements associated with the Component; these placements are defined within the [Gateway(s)]({{< relref "/controller/services/manage-gateways.md" >}}) referenced in the Component. + +## Inherited or Independent Resources + +When you configure a Component, you can choose to: + +- inherit resources and configurations from the Gateway; +- create and define new resources and configurations specific to the Component; or +- use a combination of inherited and Component-specific configurations. + +For example, a Gateway's ingress definition might include the URIs for a Service's FQDN(s) and the associated TLS [certificates]({{< relref "/controller/services/manage-certs.md" >}}), while the Component's ingress definition would contain relative URIs for the FQDN defined in the Gateway: + +- Gateway Ingress URIs: `www.example.com` +- Component Ingress URIs: `/about/`, `/docs/`, `/contact/` + +Together, the Component's relative paths and the Gateway's FQDN results form the absolute URI for each path (`www.example.com/about/`, `www.example.com/docs/`, and `www.example.com/contact/`). + +Likewise, you can configure a Component with its own FQDN and paths, but inherit the TLS certificates from the Gateway. Or, you can configure a Component that doesn't inherit any resources or configurations from the Gateway and uses its own set of definitions. + +{{< note >}}The ability to add resources, like Certificates, is determined by your account permissions. If you don't have the ability to add new Certs, contact your administrator. {{< /note >}} + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/about-caching.md b/content/controller/app-delivery/about-caching.md new file mode 100644 index 000000000..4a8212ccb --- /dev/null +++ b/content/controller/app-delivery/about-caching.md @@ -0,0 +1,389 @@ +--- +description: Learn how F5 NGINX Controller handles caching configurations and what NGINX + cache directives are supported. +docs: DOCS-339 +doctypes: +- concept +tags: +- docs +title: About Caching +toc: true +weight: 200 +--- + +## Overview + +The F5 NGINX Controller Application Delivery (AD) module lets you configure content caching by using either the user interface (UI) or the [Components REST API](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/#tag/Components). + +## Basic Caching + +NGINX Controller Caching supports [basic caching](https://www.nginx.com/blog/nginx-caching-guide/#How-to-Set-Up-and-Configure-Basic-Caching) via the *disk store* resource. + +When you add a disk store to a component, you define the location of the cache on the hard disk. The path you specify for the disk store is the base path under which you want to store the cache files for the component. + +{{< important >}} +The directory that you want to use as the cache must already exist and the NGINX process must have read and write permissions to it. Otherwise, NGINX Controller can't create the cached folders and files. + +If NGINX Controller can't create the desired cache directory and/or write files to it, the user interface will display an error for the component. +{{< /important >}} + +When you use the UI or the REST API to create a single disk store, NGINX Controller adds the following directives to the auto-generated `nginx.conf` file: + +- [`proxy_cache_path`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path), in the top-level `http` context; +- [`proxy_cache`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), added to the component's `location` block. + +You can include NGINX Controller Caching data when creating [custom dashboards]({{< relref "/controller/analytics/dashboards/custom-dashboards" >}}) and [alerts]({{< relref "/controller/analytics/alerts/manage-alerts" >}}) for your applications. + +## Cache Splitting + +NGINX Controller Caching also supports splitting the cache across multiple directories, which can reside on different hard drives. To split the cache, you need to create a disk store for each desired cache location. The Caching *split config* settings let you determine how NGINX Controller should split the data between the disk stores -- either by percentage or by pattern matching. + +The percentage option lets you set the percentage of the cache to store in each location. Pattern matching lets you define where to store cache contents -- like certain file types -- and which cache location should send a response based on the request. + +{{< see-also >}} +Read the [NGINX Caching Guide](https://www.nginx.com/blog/nginx-caching-guide/#Splitting-the-Cache-Across-Multiple-Hard-Drives) to learn more about splitting the cache across multiple hard drives. +{{< /see-also >}} + +When you define a split cache, NGINX Controller adds a `split_clients` configuration block with percentage split or a `map` configuration block with string split to the `http` context of the generated `nginx.conf` file. + +## Advanced Caching + +As noted earlier in this topic, you can use Caching to manage basic caching use cases. +To add any of the [`ngx_http_proxy_module`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html) cache directives listed below, use NGINX Controller **Snippets**. + +- [`proxy_cache_background_update`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_background_update) +- [`proxy_cache_bypass`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_bypass) +- [`proxy_cache_convert_head`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_convert_head) +- [`proxy_cache_key`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key) +- [`proxy_cache_lock`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock) +- [`proxy_cache_lock_age`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock_age) +- [`proxy_cache_lock_timeout`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock_timeout) +- [`proxy_cache_max_range_offset`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_max_range_offset) +- [`proxy_cache_methods`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_methods) +- [`proxy_cache_min_uses`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_min_uses) +- [`proxy_cache_purge`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_purge) +- [`proxy_cache_revalidate`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_revalidate) +- [`proxy_cache_use_stale`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_use_stale) +- [`proxy_cache_valid`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_valid) +- [`proxy_no_cache`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_no_cache) +- [`proxy_temp_path`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_path) + +In order to enable the collection of app centric caching metrics, NGINX Controller has added a minimal set of APIs to enable and control caching. For more advanced caching features, you can make use of `configSnippets` to configure the directives above. + +{{< note >}} +When you enable the temporary path for disk store with `tempPath:ENABLED`, you need to set the temporary path `proxy_temp_path` using the snippets API. +{{< /note >}} + + +{{< note >}} +NGINX Controller does not collect or report metrics for directives configured using Snippets. +{{< /note >}} + +## Usage Examples + +Each of the examples provided here shows a sample API request and the resulting NGINX config file. These examples are for learning purposes only and are not intended for use in production settings. + +### Basic Caching {#basic-caching-example} + +The example below shows an excerpt of a REST API request that sets up basic caching. This example defines one server as the cache location. + +```json +"desiredState": { + "caching": { + "diskStores": [ + { + "path": "/tmp/cache-1", + "maxSize": "5G", + "minFree": "10k", + "inMemoryStoreSize": "500M", + "inactiveTime": "2s" + } + ] + } +} +``` + +The above request modifies the NGINX Controller-generated `nginx.conf` file as follows: + +- Adds a `proxy_cache_path` directive for the disk store to the `http` context; +- Adds a new `proxy_cache` directive to the `location` block for the component. + +```Nginx configuration file {hl_lines=[1,14]} +proxy_cache_path /tmp/cache-1/app_centric_example-env|example-app-1|example-app-component| max_size=5G min_free=10k keys_zone=app_centric_example-env|example-app-1|example-app-component|/tmp/cache-1:500M purger=off; + +server { + server_name test.example.com; + listen 80; + status_zone server_5ae404e8-005d-38e8-b355-6d54cb219730; + set $f5_gateway example-gw; + f5_metrics_marker gateway $f5_gateway; + set $f5_environment example-env; + f5_metrics_marker environment $f5_environment; + location / { + error_log /dev/null; + access_log off; + proxy_cache app_centric_example-env|example-app-1|example-app-component|/tmp/cache-1; + set $f5_app example-app-1; + f5_metrics_marker app $f5_app; + set $f5_component example-app-component; + f5_metrics_marker component $f5_component; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header Host $host; + proxy_set_header Connection ''; + proxy_http_version 1.1; + proxy_pass http://wg-example_http_b4859463-b3bd-4ccb-8442-e21253a50da7; + } +} +``` + +### Cache Splitting using Percentage and Snippets {#split-percentage-example} + +You can set up cache splitting using the Percentage criteria to define the percent of the cache to store in each location. + +The example request excerpt below does the following: + +- splits the cache across three different storage paths; +- sets one of the stores -- `/tmp/default` -- as the default; +- uses the Component `configSnippets.uriSnippets` API to configure the [`add_header`](https://nginx.org/en/docs/http/ngx_http_headers_module.html#add_header) directive, to include `Cache` header with "HIT/MISS/EXPIRED/BYPASS" in the response; +- uses the Component `configSnippets.uriSnippets` API to set a cache duration time of 1m for all requests using [`proxy_cache_valid`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_valid). + +```json +{ + "desiredState": { + "configSnippets": { + "uriSnippets": [ + { + "directives": [ + { + "directive": "proxy_cache_valid", + "args": [ + "any", + "1m" + ] + } + ] + } + ] + }, + "programmability": { + "responseHeaderModifications": [ + { + "action": "ADD", + "headerName": "X-Cache-Status", + "headerValue": "$upstream_cache_status" + } + ] + }, + "caching": { + "splitConfig": { + "criteriaType": "PERCENTAGE", + "key": "$request_uri" + }, + "diskStores": [ + { + "inMemoryStoreSize": "100m", + "inactiveTime": "1m", + "isDefault": false, + "maxSize": "5G", + "minFree": "10k", + "path": "/tmp/hdd1", + "percentCriteria": "20%" + }, + { + "inMemoryStoreSize": "100m", + "inactiveTime": "10s", + "isDefault": false, + "maxSize": "5g", + "minFree": "10k", + "path": "/tmp/hdd2", + "percentCriteria": "50%" + }, + { + "inMemoryStoreSize": "100m", + "inactiveTime": "15s", + "isDefault": true, + "maxSize": "2g", + "minFree": "10k", + "path": "/tmp/default" + } + ] + } + } +} +``` + +The above request modifies the `nginx.conf` file as follows: + +- Adds the `split_clients` directive to the `http` context, reflecting the criteria defined for `diskStores`; +- Adds a `proxy_cache_path` directive for each disk store to the `http` context; +- Adds a new `proxy_cache` variable -- `$cache_` -- to the `location` block for the component; +- Adds the `proxy_cache_valid` and `add_header` directives to the `location` block for the component. + +```Nginx configuration file {hl_lines=["1-8",27,36,37]} +split_clients $request_uri $cache_bdfa5d91f97d37dbb97a42dde6a5f4ff { + 20% app_centric_env|app|split_cache_percentage|/tmp/hdd1; + 50% app_centric_env|app|split_cache_percentage|/tmp/hdd2; + * app_centric_env|app|split_cache_percentage|/tmp/default; +} +proxy_cache_path /tmp/hdd1/app_centric_env|app|split_cache_percentage| max_size=5G min_free=10k keys_zone=app_centric_env|app|split_cache_percentage|/tmp/hdd1: 100m purger=off inactive=1m; +proxy_cache_path /tmp/hdd2/app_centric_env|app|split_cache_percentage| max_size=5g min_free=10k keys_zone=app_centric_env|app|split_cache_percentage|/tmp/hdd2: 100m purger=off inactive=10s; +proxy_cache_path /tmp/default/app_centric_env|app|split_cache_percentage| max_size=2g min_free=10k keys_zone=app_centric_env|app|split_cache_percentage|/tmp/default: 100m purger=off inactive=15s; +upstream split_p_http_7ec84d9e-373e-4d90-bcaa-0e33dcc4b906 { + zone split_p_http_7ec84d9e-373e-4d90-bcaa-0e33dcc4b906 160k; + server 10.146.187.154: 80; + keepalive 64; + keepalive_requests 100; + keepalive_timeout 60s; +} +server { + server_name test.example.com; + listen 80 reuseport; + status_zone server_4d1ee345-cf08-354e-93dc-1c3a844a04e3; + set $f5_gateway gw; + f5_metrics_marker gateway $f5_gateway; + set $f5_environment env; + f5_metrics_marker environment $f5_environment; + location /aaa { + error_log /dev/null; + access_log off; + proxy_cache $cache_bdfa5d91f97d37dbb97a42dde6a5f4ff; + set $f5_app app; + f5_metrics_marker app $f5_app; + set $f5_component split_cache_percentage; + f5_metrics_marker component $f5_component; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header Host $host; + proxy_set_header Connection ''; + proxy_http_version 1.1; + add_header Cache $upstream_cache_status; + proxy_cache_valid any 1m; + proxy_pass http: //split_p_http_7ec84d9e-373e-4d90-bcaa-0e33dcc4b906; +} +``` + +### Cache Splitting using Pattern Matching and Snippets {#split-string-example} + +You can also use pattern matching to cache based on a certain string (`stringCriteria`) for each store. For example, you can define the string criteria as a list of file formats, as shown in the request excerpt below. As in the [percentage example](#split-percentage-example), we're also using the Components `configSnippets` API here to set the `add_header` and `proxy_cache_valid` directives. + +The request below splits the cache into three different stores. + +- One store is the default location and has no string criteria defined. +- One store is the location for all `.html`files. +- Ones store is the location for all `.mp4` files. + +```json +"desiredState": { + "configSnippets": { + "uriSnippets": [ + { + "directives": [ + { + "directive": "proxy_cache_valid", + "args": [ + "any", + "1m" + ] + } + ] + } + ] + }, + "programmability": { + "responseHeaderModifications": [ + { + "action": "ADD", + "headerName": "X-Cache-Status", + "headerValue": "$upstream_cache_status" + } + ] + }, + "caching": { + "splitConfig": { + "criteriaType": "STRING", + "key": "$request_uri" + }, + "diskStores": [ + { + "inMemoryStoreSize": "10m", + "inactiveTime": "1m", + "isDefault": false, + "maxSize": "2G", + "minFree": "1m", + "path": "/tmp/hdd1", + "stringCriteria": ["~.html$"] + }, + { + "inMemoryStoreSize": "50m", + "inactiveTime": "1m", + "isDefault": false, + "maxSize": "1g", + "minFree": "10k", + "path": "/tmp/hdd2", + "stringCriteria": ["~.mp4$"] + }, + { + "inMemoryStoreSize": "30m", + "inactiveTime": "1m", + "isDefault": true, + "maxSize": "2g", + "minFree": "10k", + "path": "/tmp/default" + } + ] + } +} +``` + +The above request modifies the `nginx.conf` file as follows: + +- Adds a `map` directive to the `http` context, reflecting the string criteria defined for the disk stores. +- Adds a `proxy_cache_path` directive to the `http` context for each disk store. +- Adds a new variable `$cache_` to the `location` block for the component. + +```Nginx configuration file {hl_lines=["1-8",30,39,40]} +map $request_uri $cache_8de5273e13f731e283acbc999760c3e3 { + ~.html$ app_centric_env|app|split_string|/tmp/hdd1; + ~.mp4$ app_centric_env|app|split_string|/tmp/hdd2; + default app_centric_env|app|split_string|/tmp/default; +} +proxy_cache_path /tmp/hdd1/app_centric_env|app|split_string| max_size=2G min_free=1m keys_zone=app_centric_env|app|split_string|/tmp/hdd1:10m purger=off inactive=1m; +proxy_cache_path /tmp/hdd2/app_centric_env|app|split_string| max_size=1g min_free=10k keys_zone=app_centric_env|app|split_string|/tmp/hdd2:50m purger=off inactive=1m; +proxy_cache_path /tmp/default/app_centric_env|app|split_string| max_size=2g min_free=10k keys_zone=app_centric_env|app|split_string|/tmp/default:30m purger=off inactive=1m; +upstream wg_http_0ace772a-0c68-4d01-a443-6e377d4f6133 { + zone wg_http_0ace772a-0c68-4d01-a443-6e377d4f6133 160k; + server 10.146.187.154:80; + keepalive 64; + keepalive_requests 100; + keepalive_timeout 60s; +} +map $host $f5_published_api { + default -; +} +server { + server_name test.example.com; + listen 80 reuseport; + status_zone server_4d1ee345-cf08-354e-93dc-1c3a844a04e3; + set $f5_gateway gw; + f5_metrics_marker gateway $f5_gateway; + set $f5_environment env; + f5_metrics_marker environment $f5_environment; + location / { + error_log /dev/null; + access_log off; + proxy_cache $cache_8de5273e13f731e283acbc999760c3e3; + set $f5_app app; + f5_metrics_marker app $f5_app; + set $f5_component split_string; + f5_metrics_marker component $f5_component; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header Host $host; + proxy_set_header Connection ''; + proxy_http_version 1.1; + add_header Cache $upstream_cache_status; + proxy_cache_valid any 1m; + proxy_pass + } +} +``` + +{{< versions "3.22" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/about-snippets.md b/content/controller/app-delivery/about-snippets.md new file mode 100644 index 000000000..da03de377 --- /dev/null +++ b/content/controller/app-delivery/about-snippets.md @@ -0,0 +1,566 @@ +--- +docs: DOCS-340 +doctypes: +- concept +tags: +- docs +title: About Snippets +toc: true +weight: 300 +--- + +## Overview + +The F5 NGINX Controller Application Delivery (AD) module lets you configure NGINX directives that aren't represented in the NGINX Controller API via "config snippets", or "Snippets". You can do so by using either the user interface (UI) or the [Application Delivery REST API](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/). + +{{< caution >}} +When you use Snippets to customize your NGINX configuration, your changes are applied to the `nginx.conf` file *as is*. NGINX Controller does not verify that your configuration is valid before applying the snippet. + +We strongly recommend verifying Snippets in a lab environment before making any changes in production. +{{< /caution >}} + +## Types of Snippets + +There are five types of Snippets, which you can configure for gateways or components. This lets you add custom directives into the corresponding NGINX configuration blocks generated by the gateways and components for the associated URIs. + +{{< note >}}The `uriSnippets` can't be used for TCP/UDP components.{{< /note >}} + +{{}} + +| Snippet | Description | Corresponding API Endpoint | +| ----------------------- | ------------------------------------------------------------------ | -------------------------- | +| `httpSnippet` | Adds directives to the `http` block. | Gateway | +| `mainSnippet` | Adds directives to the `main` block. | Gateway | +| `streamSnippet` | Adds directives to the `stream` block. | Gateway | +| `uriSnippets` | Adds directives to the component's `server` and `location` blocks. | Component | +| `uriSnippets` | Adds directives to the gateway's `server` blocks. | Gateway | +| `workloadGroupSnippets` | Adds directives to the `upstream` blocks. | Component | + +{{}} + +## Best Practices + +### Gateway Partitions + +It's important to avoid adding conflicting snippets to the same [context](https://docs.nginx.com/nginx/admin-guide/basic-functionality/managing-configuration-files/#contexts) in your NGINX configuration file. We recommend that you create one stand-alone Gateway to hold the `main`, `http`, and `stream` snippets. Doing so lets you share the configuration for these contexts across Gateways that define the URIs (`server` blocks) for particular instances while reducing the risk of duplicate or conflicting settings. + +### NGINX Variables + +NGINX configurations commonly use [NGINX variables](https://nginx.org/en/docs/varindex.html) or custom variables. If you prefer to configure NGINX Controller by using the REST API, you may run into problems with variable expansion when sending JSON as part of a `curl` request using th `-d` flag. The recommended best practice for this is to reference the JSON in a data file instead of sending the string as part of the request. An alternative is to redefine the variable to itself, which allows the variable to pass through to the NGINX configuration. If you're using the NGINX `$host` variable in your JSON data -- represented by the `` placeholder in the example below -- you would define the variable before the curl request as follows: + +```none +host='$host' curl -s -k -H "Content-Type: application/json" -X PUT -d "" https://192.168.100.10:80/ +``` + +## Usage Examples + +{{< caution >}} +The examples provided here are intended for demonstration purposes only. +We strongly recommend verifying Snippets in a lab environment before making any changes in production. +{{< /caution >}} + +### Add HTTP Strict Transport Security Headers + +If you want to implement a [HTTP Strict Transport Security](https://www.nginx.com/blog/http-strict-transport-security-hsts-and-nginx/) (HSTS) policy, you can add a snippet to your gateway. +For example: + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "uriSnippets": [ + { + "applicableUris": [ + { + "uri": "http://172.16.0.238:81" + } + ], + "directives": [ + { + "directive": "add_header", + "args": ["Strict-Transport-Security", "max-age=31536000; includeSubDomains", "always"] + } + ] + } + ] + }, + "ingress": { + "uris": { + "http://example.com:8020": {} + }, + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + } + } + } +} +``` + +### Allow or Deny IP Addresses + +You can add IP addresses to your allow- or deny-list by using the `allow` or `deny` directives in a gateway snippet. For example: + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "uriSnippets": [ + { + "applicableUris": [ + { + "uri": "" + } + ], + "directives": [ + { + "directive": "deny", + "args": ["192.0.2.2"] + }, + { + "directive": "allow", + "args": ["192.0.2.1/24"] + }, + { + "directive": "allow", + "args": ["2001:0db8::/32"] + }, + { + "directive": "deny", + "args": ["all"] + } + ] + } + ] + }, + "ingress": { + "uris": { + "http://example.com:8020": {} + }, + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + } + } + } +} +``` + + +### Load the NGINX Prometheus Module + +In order to use the [NGINX Prometheus-njs](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/prometheus-njs/) module with NGINX Controller, you need to use`load_module` in the `main` context, `js_import` in the `http` context, and `js_content` in the `location`. NGINX Controller automatically enables the location api `location /api`, which is also required for metrics reporting. + +After installing the module, add the following Snippets to your gateway. This will add `load_module` and `js_import`: + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "mainSnippet": { + "directives": [ + { + "directive": "load_module", + "args": ["modules/ngx_http_js_module.so"] + } + ] + }, + "httpSnippet":{ + "directives": [ + { + "directive": "js_import", + "args": ["/usr/share/nginx-plus-module-prometheus/prometheus.js"] + } + ] + } + }, + "ingress": { + "uris": { + "http://example.com:8020": {} + }, + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + } + } + } +} + +``` + +Then, you'd add a config snippet similar to the example below to your component. + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "uriSnippets": [ + { + "applicableUris": [ + { + "uri": "/metrics" + } + ], + "directives": [ + { + "directive":"js_content", + "args": ["prometheus.metrics"] + } + ] + } + ] + }, + "ingress": { + "uris": { + "http://example.com:8020": {} + }, + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + } + } + } +} +``` + + +### NGINX as a WebSocket Proxy + +If you want to use NGINX Controller to configure [NGINX as a WebSocket Proxy](https://www.nginx.com/blog/websocket-nginx/), you can customize your `nginx.conf` by using Snippets and header programmability. + +In the gateway, provide an `http` snippet that defines the `map` directive and the `server` configuration: + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "httpSnippet": { + "directives": [ + { + "directive": "map", + "args": ["$http_upgrade", "$connection_upgrade"], + "block": [ + { + "directive": "default", + "args": ["upgrade"] + }, + { + "directive": "''", + "args": ["close"] + } + ] + } + ] + } + }, + "ingress": { + "uris": { + "http://example.com:8020": {} + }, + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + } + } + } +} +``` + +Then, add the two required headers to the component using `requestHeaderModifications`. For example: + +```json +{ + "metadata": { + "name": "", + }, + "desiredState": { + "ingress": { + "uris": { + "/": {} + }, + "gatewayRefs": [ + {"ref": "/services/environments/${env}/gateways/"} + ] + }, + "programmability": { + "requestHeaderModifications": [ + { + "action": "ADD", + "headerName": "Upgrade", + "headerValue": "$http_upgrade" + }, + { + "action": "ADD", + "headerName": "Connection", + "headerValue": "$connection_upgrade" + } + ] + }, + "backend": { + "workloadGroups": { + "websocket": { + "uris": { + "http://:8010": {} + } + } + } + } + } +} +``` + +### Forward Errors Logs to Remote Syslog + +If you want to forward HTML error logs to [syslog](https://nginx.org/en/docs/syslog.html), you can add the `error_log` directive snippet to your gateway. +For example: + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "httpSnippet": { + "directives": [ + { + "directive": "error_log", + "args": ["syslog:server=", "debug"] + } + ] + } + }, + "ingress": { + "uris": { + "http://example.com:8000": {} + }, + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + } + } + } +} +``` + +{{< note >}} +The `error_log` and `accesslog` directives can appear at various block levels (`main`, `http`, `stream`, `server`, `location`, etc.). +NGINX Controller adds these directives to control logging to the local file. When using Snippets to add additional logging capabilities, the inner blocks override the outer block definitions. +For example, if you enable remote logging for errors at the `main` level, and you add an `error_log` directive to a `server` or `location` block that uses local logging, the local logging configuration overrides the remote logging configured at the `main` level. +{{< /note >}} + +### Manage IPv6 Addresses + +You can use Snippets to manage IPv6 addresses for HTTP and TCP/UDP use cases. IPv6 address management is supported in both Gateway and Component Snippets. + +- Be sure to set the `reuseport` option for all IPv6 listen directives. Failure to do so can cause bind errors. +- NGINX Controller's post-processing logic removes the `reuseport` option in certain cases. This is a [known issue]({{< relref "/controller/releases/adc/adc-release-notes-3.22.md" >}}) when the IPv6 port matches an IPv4 port and the IPv4 listen directive does not specify an IP address (in other words, a wildcard IP). To change the IPv6 listen directive's IP address, remove the Snippet, then re-add the Snippet with a new IPv6 address. + +If you need to support IPv6 addresses for the NGINX listen directive, you can use a snippet similar to the ones shown below to achieve it. + +#### HTTP Gateway with IPv6 + +For HTTP, use the Gateway URI Snippets block to add an IPv6 [`listen`](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive to the `server` blocks. + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "uriSnippets": [ + { + "directives": [ + { + "directive": "listen", + "args": [ + "[::]:80", + "reuseport" + ] + }, + { + "directive": "listen", + "args": [ + "[]:80", + "reuseport" + ] + } + ] + } + ] + }, + "ingress": { + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + }, + "uris": { + "http://example.com:80": {} + } + } + } +} +``` + +{{< note >}}You cannot add IPv6 `listen` directives to a server block when the FQDN is defined in the Component URI (for example, `http://{FQDN}/{PATH}`). {{< /note >}} + +#### TCP/UDP Component with IPv6 + +For TCP/UDP, use the Component URI Snippets block to add an IPv6 [`listen`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) directive to the `server` blocks. + +##### TCP Component + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "uriSnippets": [ + { + "directives": [ + { + "directive": "listen", + "args": [ + "[::]:9090", + "reuseport" + ] + } + ] + } + ] + }, + "backend": { + "workloadGroups": { + "wg": { + "uris": { + "tcp://:9090": {} + }, + } + } + }, + "componentType": "TCPUDP", + "ingress": { + "gatewayRefs": [ + { + "ref": "/services/environments//gateways/" + } + ], + "uris": { + "tcp://*:9090": {} + } + } + } +} +``` + +##### UDP Component + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "uriSnippets": [ + { + "directives": [ + { + "directive": "listen", + "args": [ + "[]:9053", + "udp", + "reuseport" + ] + } + ] + } + ] + }, + "backend": { + "workloadGroups": { + "wg": { + "uris": { + "udp://:9053": {} + } + } + } + }, + "componentType": "TCPUDP", + "ingress": { + "gatewayRefs": [ + { + "ref": "/services/environments//gateways/" + } + ], + "uris": { + "udp://*:9053": {} + } + } + } +} +``` + +#### IPv6-only Server Block + +To add an IPv6-only `server` block, define the entire block in the Gateway HTTP or the Stream Snippets block. + +#### UI Config + +Add `listen` directives with parameters in URI Snippets. To learn more about what the `listen` directive does and what parameters it accepts, refer to the following topics: + +- [`stream` listen options](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) +- [`http` listen options](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) + +{{< note >}} + +The `reuseport` parameter creates an individual listening socket for each worker process. See [`reuseport` option](https://nginx.org/en/docs/http/ngx_http_core_module.html#reuseport). + +{{< /note >}} + +## Extend App Security with Snippets + +When adding [NGINX Controller App Security]({{< relref "add-app-security-with-waf" >}}) to your components, you can use Snippets to add NGINX App Protect directives that aren't represented in the NGINX Controller API. You can also use Snippets to [tune your NGINX App Protect WAF performance]({{< relref "/controller/app-delivery/security/tutorials/tune-waf-for-app" >}}). + +Refer to [Extend App Security with Snippets]({{< relref "extend-app-security-snippets" >}}) for more information and examples. + +{{< versions "3.22" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/deploy-simple-app.md b/content/controller/app-delivery/deploy-simple-app.md new file mode 100644 index 000000000..e62726cb9 --- /dev/null +++ b/content/controller/app-delivery/deploy-simple-app.md @@ -0,0 +1,64 @@ +--- +description: Overview of the steps required to deploy a simple App. +docs: DOCS-477 +doctypes: +- tutorial +tags: +- docs +title: Deploy a Simple Application +toc: true +weight: 400 +--- + +## Overview + +This topic provides an overview of the steps required to create a simple application by using F5 NGINX Controller's user interface. Use the links provided to learn more about each step. + +## Create an Environment + +First, you'll need to create an Environment. + +- [About Environments]({{< relref "/controller/services/manage-environments.md#about-environments" >}}) +- [Create an Environment]({{< relref "/controller/services/manage-environments.md#create-an-environment" >}}) + +## Create a Certificate + +If you want to secure your application traffic, you'll need to add Certificates. + +If you just want to deploy a simple HTTP application, skip ahead to [Gateways](#create-a-gateway). + +{{< tip >}} Make sure that you add the new Cert to the Environment that you created in the previous step.{{< /tip >}} + +- [About Certificates]({{< relref "/controller/services/manage-certs.md#about-certificates" >}}) +- [Create a certificate]({{< relref "/controller/services/manage-certs.md#create-a-cert" >}}) + +## Create a Gateway + +Next, you'll need to create a Gateway. Be sure to add the Gateway to your Environment. + +- [About Gateways]({{< relref "/controller/services/manage-gateways.md#about-gateways" >}}) +- [Create a Gateway]({{< relref "/controller/services/manage-gateways.md#create-a-gateway" >}}) + +## Create an Identity Provider + +If you require authentication for any Component, you need to define an Identity Provider. The provider should be in the same environment as your component. +to be in the same environment as your components. + +- [Identity Provider]({{< relref "/controller/services/manage-identity-providers.md" >}}) + +## Create an App + +Create an App. The App needs to be in your Environment and needs to connect to your Gateway. If you created a Cert by following the instructions above and added the Cert to the Gateway, the App will access the Cert via the Gateway. If you didn't add the Cert to the Gateway, you can reference the Cert in the App's definition by choosing the Cert from the Certs list. + +- [About Apps]({{< relref "/controller/app-delivery/about-app-delivery.md#apps" >}}) +- [Create an App]({{< relref "/controller/app-delivery/manage-apps.md#create-an-app" >}}) + +## Create Components for your App + +Finally, create Components for your App. Components let you partition an App into smaller, self-contained pieces that are each responsible for a particular function of the overall application. For example, a Component could correspond to a microservice that, together with several other microservices, comprises a complete application. + +- [About Components]({{< relref "/controller/app-delivery/about-app-delivery.md#components" >}}) +- [Create a Component]({{< relref "/controller/app-delivery/manage-apps.md#create-a-component" >}}) + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/manage-apps.md b/content/controller/app-delivery/manage-apps.md new file mode 100644 index 000000000..ab4e10613 --- /dev/null +++ b/content/controller/app-delivery/manage-apps.md @@ -0,0 +1,375 @@ +--- +description: Create, view, and edit Apps and Components. +docs: DOCS-478 +doctypes: +- task +tags: +- docs +title: Manage Apps & Components +toc: true +weight: 300 +--- + +## Overview + +Follow the steps in this topic to learn how to create and manage Apps and App Components. + +{{< tip >}}You can also use the F5 NGINX Controller API to create Apps and Components. See the [NGINX Controller API Reference]({{< relref "/controller/api/_index.md" >}}) for details.{{< /tip >}} +  + +## Before You Begin + +You will need to select an [Environment]({{< relref "/controller/services/manage-environments.md#create-an-environment" >}}) and [Gateway]({{< relref "/controller/services/manage-gateways.md#create-a-gateway" >}}) -- or create new Environment and Gateway resources -- when adding a new App. + +{{< note >}}If you do not have permission to create these resources and none are available to select, contact your system administrator.{{< /note >}} +  + +## Create an App + +To create an App: + +1. Open the NGINX Controller user interface and log in. +1. Select the NGINX Controller menu icon, then select **Services**. +1. On the **Services** menu, select **Apps**. +1. On the **Apps** menu, select **Create App**. +1. On the **Create App** page, provide the following information: + - Name + - Environment + - Description (Optional) + - Display Name (Optional) + - Tags (Optional) +1. Select **Submit**. + +## Create a Component + +To create a Component: + +1. Open the NGINX Controller user interface and log in. +1. Select the NGINX Controller menu icon, then select **Services**. +1. On the **Services** menu, select **Apps**. +1. On the **Apps** menu, in the **Recent Apps** section, select the name of the App that you want to add the Component to. +1. On the Overview page for your App, select **Create Component**. +1. Then, complete each of the configuration sections as needed: + + - [General Configuration](#general-configuration) + - [URIs](#uris) + - [Workload Groups](#workload-groups) + - [Ingress](#ingress) + - [Backend](#backend) + - [Monitoring](#monitoring) + - [Errors and Logs](#errors-and-logs) + - [Programmability](#programmability) + - [Caching](#caching) + - [Snippets](#snippets) + - [Rate Limiting](#rate-limiting) + - [Authentication](#authentication) + - [Security](#security) + +1. When ready, review the API Spec and then select **Submit** to create the Component. + +## Configuration Options + +### General Configuration + +On the **Create App Component** *Configuration* page: + +1. Select the App Component Type: + + - Web + - TCP/UDP + +1. Provide the name for your Component. +1. (Optional) Provide a Display Name. +1. (Optional) Provide a Description. +1. (Optional) Add any desired tags. +1. (Optional) Select a **Gateway Ref** or select **Create Gateway Ref** to create a new Gateway. +1. Select **Next**. + +### URIs + +A Component definition must contain one or more URIs. + +**Web Component URIs** can be either of the following: + +- a complete URI that follows the format `[:port][/path]`, or +- a relative path that follows the format `[/...]`. + +Relative paths inherit the host URI configured for the Gateway associated with the Component. +The host and relative path(s) defined for a Component take precedence over the host defined in the associated Gateway. + +Example Web URI definitions: + +- `http://www.f5.com:8080/sales` +- `http://*.f5.com:5050/test` +- `/images` +- `/*.jpg` +- `/locations/us/wa*` + +**TCP/UDP URIs** must be a complete URI that follows the format ``. +TCP+TLS URIs can include TLS information. + +Example TCP/UDP URI definitions: + +- `tcp://192.168.1.1:12345` +- `tcp+tls://192.168.1.1:12346` +- `tcp://192.168.1.1:12345-12350` +- `tcp://*:12345` +- `udp://192.168.1.1:12345` +- `udp://*:12345` + +On the **Create App Component** *URIs* page: + +1. Define the **URIs**: + + - Select **Add URI**. + - In the **URI** box, type the URI for the Component. + - (Optional) Select a **Match Method** (applicable only to Web Components). + - (Optional) Select **Customize for this URI** to add custom **TLS Settings**. + + {{< note >}} +TLS Settings can be inherited from the Gateway, or customized at the Component level. Enable this option if you want the Component to use a different cert than that used by the Gateway. + {{< /note >}} + +1. (Optional) Define the **Shared TLS Settings**. + + - To use a cert that is already associated with the Gateway, select it from the list. + - To add a new shared cert, select **Create New**. + +1. Select **Next**. + +### Workload Groups + +On the **Create App Component** *Workload Groups* page: + +1. Provide a Workload Group Name. +1. (Optional) Select a Location. + + The location determines which instances or instance groups the workload group is applied to. If any workload group specifies a location, they all must specify a location. Note: If the associated gateway uses instance groups, the location should refer to the instance group location, not the location(s) of the individual instances that make up that group. + + {{< see-also >}}Refer to the [Manage Locations]({{< relref "/controller/infrastructure/locations/manage-locations.md" >}}) topic for more information.{{< /see-also >}} +1. Define the backend workload URIs. +1. (Optional) Define the DNS Server. +1. (Optional) Select the Load Balancing Method. The default value is "Round Robin". + + {{< see-also >}}Refer to the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/) for more information about the available options.{{< /see-also >}} + +1. (Optional) Select the Session Persistence Type (applicable only to Web Components). +1. (Optional) Select the Desired Proxy Settings (applicable only to Web Components). + + {{< tip >}}Hover your pointer over the info icon for each setting to learn about the expected values and requirements.{{< /tip >}} +1. Select **Next**. + +### Ingress + +On the **Create App Component** *Ingress* page: + +{{< note >}} The following settings are applicable only to Web components. {{< /note >}} + +1. (Optional) Select the supported HTTP methods. +1. (Optional) Set the desired **Client Max Body Size**. + + {{< see-also >}} +Refer to the [`ngx_http_core_module` docs](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) for more information about these options. + {{< /see-also >}} + +1. Select **Next**. + +### Backend + +On the **Create App Component** *Backend* page: + +{{< note >}} The following settings are applicable only to Web components. {{< /note >}} + +1. (Optional) Enable [NTLM authentication](https://en.wikipedia.org/wiki/Integrated_Windows_Authentication) to allow proxying requests with NT LAN Manager (NTLM) Authentication. +1. (Optional) Specify the persistent state. +1. (Optional) Set the HTTP protocol version for proxying. +1. (Optional) Specify the Keep Alive settings: + + - **Connections**: Set the maximum number of idle keepalive connections to upstream servers that are preserved in the cache of each worker process. When this number is exceeded, the least recently used connections are closed. + - **Requests per Connection**: Set the maximum number of requests that can be served through one keepalive connection. After the maximum number of requests is made, the connection is closed. + - **Idle Timeout box**: Set a timeout during which an idle keepalive connection to an upstream server will stay open. +1. Select **Next**. + +### Monitoring + +On the **Create App Component** *Monitoring* page: + +1. (Optional) Enable **Health Monitoring** and define the desired Monitoring Request and Response. Health Monitoring is disabled by default. +1. (Optional) Enable **Workload Health Events**. Workload Health Events are disabled by default. +1. (Optional) Specify the URI to use in health check requests (applicable only to Web Components). The default is `/`. For TCP/UDP Components, specify the Send string. +1. (Optional) Specify the port to use when connecting to a server to perform a health check. The server port is used by default. +1. (Optional) Set the interval to wait between two consecutive health checks. The default is 5 seconds. +1. (Optional) Specify the number of consecutive passed health checks that must occur for a server to be considered healthy. The default is 1. +1. (Optional) Specify the number of consecutive failed health checks that must occur for a server to be considered unhealthy. The default is 1. +1. (Optional) Specify the default state for the server. The default state is `HEALTHY`. +1. (Optional) Specify the starting HTTP status code to match against (applicable only to Web components). +1. (Optional) Specify the ending HTTP status code to match against (applicable only to Web components). +1. (Optional) Select whether a response should pass in order for the health check to pass (applicable only to Web components). By default, the response should have status code `2xx` or `3xx`. +1. Select **Next**. + + {{< see-also>}} +Refer to the [`ngx_http_upstream_hc_module` docs](http://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) for more information about these options. + {{< /see-also >}} + +### Errors and Logs + +On the **Create App Component** *Logs* page: + +1. (Optional) Select the logs to enable: + + - Error Log + - Access Log + +1. (Optional) Specify the log format to use. +1. Select **Next**. + + {{< see-also >}} +Refer to the [`ngx_http_log_module` docs](http://nginx.org/en/docs/http/ngx_http_log_module.html) for more information about these options. + {{< /see-also >}} + +### Programmability + +On the **Create App Component** *Programmability* page: + +{{< note >}} The following settings are applicable only to Web components. {{< /note >}} + +1. (Optional) Select **Add URI Redirects** and define the desired redirect condition(s). +1. (Optional) Select **Add URI Rewrite** and define the desired rewrite pattern(s). +1. (Optional) Select **Add Request Header Modification** and define how to modify the request header. +1. (Optional) Select **Add Response Header Modification** and define how to modify the response header. +1. Select **Next**. + + {{< see-also >}} +Refer to the [`ngx_http_rewrite_module` docs](http://nginx.org/en/docs/http/ngx_http_rewrite_module.html) for more information about these options. + {{< /see-also >}} + +### Caching + +{{< note >}} +Introduced in NGINX Controller App Delivery module v3.22. +{{< /note >}} + +On the **Create App Component** *Caching* page: + +1. Select the *Enable Caching* toggle to turn on caching. +1. Define the *Split Config* settings as appropriate for your component. + + - **PERCENTAGE** -- Select if you want to split the cache across two or more disk stores and assign a percentage of the store to each location. The *key* field is not required for this option if users set only one disk. + - **STRING** -- Select if you want to split the cache across two or more disk stores using pattern matching. The *key* field is required for this option. + + {{< note >}}The *key* string must contain at least one valid [NGINX variable](https://nginx.org/en/docs/varindex.html). Example: `${request_uri}`{{< /note >}} + +1. Define the desired settings for the Disk Store: + + - **Path**: This is the location where the cache will be stored; this path must already exist on the data plane. + - **Max Size** + - **Min Free** + - **In Memory Store Size** + - **Is Default** + - **Temp Path** (Optional) + - **Inactive Time** (Optional) + - **Directory Level** (Optional) + - **Trim Policy** (Optional) + - **Loader Policy** (Optional) + - **Purger Policy** (Optional) + + {{< see-also >}}Refer to the [`proxy_cache_path` docs](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) for more information about these options.{{< /see-also >}} + +1. Select *Add Disk Store* to add another disk store (Optional). + This will split the cache across multiple storage locations according to the *Split Config* criteria you selected. + + The following *Split Config* options will display depending on the criteria you selected: + - **Percent Criteria** - Required when using "PERCENTAGE" criteria type; must be an integer followed by the `%` symbol; decimals are supported; for example, `75%` or `50.5%`. + - **String Criteria** - Required when using "STRING" criteria type; Depending upon the `SplitConfig`-> `Key` it could be a string like `~/html`, `~*.html$'` or IP based string like `10.1.1.2` + +1. Select **Next** to go to the next page, or **Submit** to save and submit your changes. + +### Snippets + +{{< note >}} +Introduced in NGINX Controller App Delivery module v3.22. +{{< /note >}} + +Refer to the [About Snippets]({{< relref "/controller/app-delivery/about-snippets.md" >}}) topic to learn more about Snippets and how they impact the NGINX Controller-generated `nginx.conf` file. + +On the **Create App Component** *Snippets* page: + +1. Select the appropriate snippet type: + + - *Add URI Snippet*: Adds NGINX directives to the component's `server` and `location` blocks. + - *Add Workload Group Snippet*: Adds NGINX directives to the component's `upstream` block(s). + +1. Paste or type the desired snippet into the text field. + + Snippets should follow the standard `nginx.conf` format. + For example, the below URI snippet adds the `proxy_set_header` directive to the component's `server` block. + + ```Nginx configuration file + proxy_set_header Host $proxy_host; + ``` + + {{< caution >}}When you use Snippets to customize your NGINX configuration, your changes are applied to the `nginx.conf` file *as is*. NGINX Controller does not verify that your configuration is valid before applying the snippet. We strongly recommend verifying Snippets in a lab environment before making any changes in production.{{< /caution >}} + +1. Select **Next** to preview the REST API call for your component, or **Submit** to save and submit your changes. + +### Rate Limiting + +On the **Create App Component** *Rate Limiting* page: + +{{< note >}} The following Rate Limiting settings are applicable only to Web components. {{< /note >}} + +1. Enable Rate Limiting and select a **Key**. +1. Select options for Rate and Units. +1. (Optional) Select options for Excess Request Processing and Ignore Initial N Requests. +1. Select options for Reject Status Code. +1. Select **Next**. + +### Authentication + +On the **Create App Component** *Authentication* page: + +1. Select **Add Authentication**. +1. Select an [**Identity Provider**]({{< relref "/controller/services/manage-identity-providers.md" >}}). +1. Select a **Credential Location**. +1. (Optional) Enable [**Conditional Access**]({{< relref "/controller/services/available-policies.md#conditional-access" >}}). +1. Select **Next**. + +### Security + +On the **Create App Component** *Security* page: + +{{< note >}} The following Security settings are applicable only to Web components. {{< /note >}} + +1. (Optional) Select **Enable Web Application Firewall (WAF)** to watch for or block suspicious requests or attacks. +1. (Optional) Select **Monitor Only** to allow traffic to pass without being rejected. Security events are still generated and metrics are still collected. Refer to [About App Security Analytics]({{< relref "/controller/analytics/view-app-security-analytics.md#overview" >}}) for more information. +1. (Optional) the signature(s) that you want the WAF to ignore. You can specify multiple signatures as a comma-separated list. +1. Select **Next**. + +{{< see-also >}} +Refer to the [Secure Your Apps]({{< relref "/controller/app-delivery/security/_index.md" >}}) topics to learn more about WAF and the default protections provided by NGINX App Protect. +{{< /see-also >}} + +## Edit or Delete Apps and Components + +To view, edit, and delete Apps: + +1. Open the NGINX Controller user interface and log in. +1. Select the **NGINX Controller menu icon** > **Services** > **Apps**. +1. On the **Apps** menu, select **Overview**. The **Apps Overview** page is displayed and shows a list of your Apps. +1. To view the details for an App, including metrics data and components, select the App name in the list of Apps. +1. To edit the App, select **Edit Config** on the **Quick Actions** menu. +1. To delete the App, select **Delete Config** on the **Quick Action**s menu. + +To edit or delete a Component: + +1. Open the NGINX Controller user interface and log in. +1. Select the **NGINX Controller menu icon** > **Services** > **Apps**. +1. On the **Apps** menu, select **Overview**. The **Apps Overview** page is displayed and shows a list of your Apps. +1. Select the App that contains the Component that you want to modify. The App's **Overview** page is displayed. +1. In the details panel for your App, select **Components**. +1. On the **Components** page, select the Component that you want to modify. +1. To edit the Component, select **Edit Config** on the **Quick Actions** menu. +1. To delete the Component, select **Delete Config** on the **Quick Actions** menu. + +{{< versions "3.0" "latest" "ctrlvers" >}} +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/security/_index.md b/content/controller/app-delivery/security/_index.md new file mode 100644 index 000000000..e1d529167 --- /dev/null +++ b/content/controller/app-delivery/security/_index.md @@ -0,0 +1,8 @@ +--- +aliases: +- services/apps/security/_index.md +description: Secure your applications using F5 NGINX Controller App Security +title: App Security +weight: 100 +url: /nginx-controller/app-delivery/security/ +--- diff --git a/content/controller/app-delivery/security/concepts/_index.md b/content/controller/app-delivery/security/concepts/_index.md new file mode 100644 index 000000000..1fd100ddc --- /dev/null +++ b/content/controller/app-delivery/security/concepts/_index.md @@ -0,0 +1,8 @@ +--- +aliases: +- services/apps/security/concepts/_index.md +description: Overview of the App Security module and default policy +title: Learn About App Security +weight: 100 +url: /nginx-controller/app-delivery/security/concepts/ +--- diff --git a/content/controller/app-delivery/security/concepts/app-sec-default-policy-original.md b/content/controller/app-delivery/security/concepts/app-sec-default-policy-original.md new file mode 100644 index 000000000..a422b0e9c --- /dev/null +++ b/content/controller/app-delivery/security/concepts/app-sec-default-policy-original.md @@ -0,0 +1,110 @@ +--- +description: Learn about the default protections provided by F5 NGINX Controller App + Security. +docs: DOCS-479 +doctypes: +- concept +- reference +tags: +- docs +title: Default WAF Policy +toc: true +weight: 200 +--- + +## Overview + +You can use the F5 NGINX Controller App Security module to configure and manage a web application firewall (WAF). The App Security WAF protects your applications from HTTP and web-based threats, including the [OWASP Top 10](https://owasp.org/www-project-top-ten/). + +NGINX Controller App Security provides out-of-the-box analytics events and metrics, which are reported through the NGINX Controller API and user interface. App Security works with [NGINX App Protect](https://www.nginx.com/products/nginx-app-protect), running NGINX Plus as the WAF in the data path. + +## Default Policy + +The default policy for App Security WAF in NGINX Controller focuses on [OWASP Top 10](https://owasp.org/www-project-top-ten/) protection. This policy is the same default policy that is used by NGINX App Protect. + +The default policy for NGINX Controller App Security WAF includes these security checks: + + + + + +| **Security Checks** | **Description** | +|---------------------------|-----------------| +| HTTP RFC compliance enforcement | Validation of HTTP requests to prevent the use of the HTTP protocol as an entry point for malicious requests to applications. | +| URL normalization | Decoding of requests for encoded request that contain different types of encoded escapes | +| Evasion techniques | Protection for techniques commonly used by hackers to access resources or evade what would otherwise be identified as an attack. The checks performed are:
  • Bad unescape (bad escaping)
  • Directory traversal
  • Bare byte decoding
  • Apache whitespace
  • Multiple % decoding
  • IIS Unicode codepoint
  • IIS backslashes
  • %u decoding
| +| Malformed cookie | Validates that the cookie format is RFC compliant. | +| Illegal status code | Responses in the 400–500 range -- except for `400`, `401`, `404`, `407`, `417`, `503` -- are rejected. | +| Request size exceeds the buffer | Requests that exceed the buffer size | +| Maximum length for URL, header, query string, cookie, and POST data | URL length: 2048
Header length: 4096
Query string length: 2048
Cookie length: 4096
Post data length: 4096

{{< note >}} The whole request length is not checked. The entire request cannot exceed the maximum buffer size of 10 MB.{{< /note >}} | +| Disallowed file type extension | These file types are disallowed:
  • bak, bat, bck, bkp, cfg, conf, config, ini, log, old, sav, save, temp, tmp
  • bin, cgi, cmd, com, dll, exe, msi, sys, shtm, shtml, stm
  • cer, crt, der, key, p12, p7b, p7c, pem, pfx
  • dat, eml, hta, htr, htw, ida, idc, idq, nws, pol, printer, reg, wmz
| +| Allowed methods | Only these HTTP methods are allowed:
  • GET
  • HEAD
  • POST
  • PUT
  • PATCH
  • DELETE
  • OPTIONS
| +| Character/Metacharacter validation in URL and header | Metacharacters are checked in the URL and header. | +| Parameter parsing | NGINX Controller App Security auto-detects the payload type for JSON and XML. App Security then applies the signature that matches the correct format.| +| JSON format | If the content is JSON, then App Security checks that the JSON payload body is well-formed. The max structure depth and max array length may not exceed 25. The max structure depth and max array length may not exceed 25.

No JSON schema enforcement. | +| DTD XML format | If the content is XML, then App Security checks that an XML payload body is well-formed.

No XML schema enforcement. No SOAP and Web Services Security format enforcement. | + +## Attack Types Used In Default Policy + +The following signature attack types are included with the default NGINX Controller App Security WAF policy. These attack types protect against [OWASP Top 10](https://owasp.org/www-project-top-ten/) vulnerabilities and [CVEs](https://cve.mitre.org/). Low, medium, and high accuracy signatures generate events as part of assessing the [Violation Rating](#use-of-violation-ratings-in-default-policy). + +- Command Execution Signatures +- Cross-Site Scripting Signatures +- Directory Indexing Signatures +- Information Leakage Signatures +- OS Command Injection Signatures +- Path Traversal Signatures +- Predictable Resource Location Signatures +- Remote File Include Signatures +- SQL Injection Signatures +- Authentication/Authorization Attacks Signatures +- XML External Entity (XXE) Signatures +- XPath Injection Signatures +- Buffer Overflow Signatures +- Denial of Service Signatures +- Vulnerability Scanner Signatures + +## Use of Violation Ratings in Default Policy + +The default policy for App Security assesses violations and provides a Violation Rating. This rating is an NGINX App Protect computed assessment of the risk of the request and its likelihood of an attack based on the triggered violations. + +NGINX App Protect violations are rated to distinguish between attacks and potential false-positive alerts. A rating is assigned to requests based on the presence of one or more violations. Each violation type and severity contribute to the calculation of the Violation Rating assigned to a request. + +The possible Violation Ratings are: + +- 0: No violation (no event available) +- 1: Possible False Positive (no event available) +- 2: Most Likely False positive (no event available) +- 3: Needs examination +- 4: Possible Attack +- 5: Most Likely Attack + +The Violation Rating is a dimension in Security Violation Events. NGINX App Protect rejects requests that have a Violation Rating of `4 (Possible Attack)` or `5 (Most Likely an Attack)`. However, the following violations and signature sets have a low chance of being false positives and are, therefore, configured by default to reject the request regardless of its Violation Rating: + +- High accuracy attack signatures +- Threat campaigns +- Malformed request: unparsable header, malformed cookie, and malformed body (JSON or XML). + +{{< note >}} + +With the default policy, all requests rejected by NGINX App Protect generate a Security Event in NGINX Controller. Requests with Violation Rating of `3 (Needs examination)` also generate a Security Event in NGINX Controller. All other requests do not generate a Security Event in NGINX Controller. + +{{< /note >}} + +## Additional Information + +### HTTP RFC Compliance Already Rejected By NGINX + +Note the following events are blocked by NGINX Plus and not by the NGINX Controller App Security policy. These events are not reported in NGINX Controller as security violation events. + +| **HTTP RFC Compliance Checks** | **Description** | +|--------------------------------|-----------------| +| Unparsable request content | This violation is triggered when the system's parser cannot parse the message. | +| Several Content-Length headers | More than one content-length header is a non-RFC violation. Indicates an HTTP response splitting attack. | +| NULL in header | The system issues a violation for requests with a NULL character in the header. | +| No Host header in HTTP/1.1 request | Check to see if HTTP/1/1 requests contain a "Host" header. | +| High ASCII characters in headers| Check for high ASCII characters (greater than 127) in headers. | +| Content length should be a positive number | The Content-Length header value should be greater than zero; only a numeric positive number value is accepted. | +| Bad HTTP version | Enforces legal HTTP version number (only 0.9 or higher allowed). | + +{{< versions "3.12" "latest" "ctrlvers" >}} diff --git a/content/controller/app-delivery/security/concepts/app-sec-metrics.md b/content/controller/app-delivery/security/concepts/app-sec-metrics.md new file mode 100644 index 000000000..6a0da647d --- /dev/null +++ b/content/controller/app-delivery/security/concepts/app-sec-metrics.md @@ -0,0 +1,185 @@ +--- +description: Learn about the F5 NGINX Controller Application Security metrics and events. +docs: DOCS-480 +doctypes: +- reference +tags: +- docs +title: App Security Metrics +toc: true +weight: 400 +--- + +## Overview + +This topic provides reference information for the metrics and events that F5 NGINX Controller reports for Application Security. + +## Security Metrics and Event Dimensions + +The following table shows the attributes and dimensions you can view and filter by for WAF violation events. + +{{}} + +| **Attribute** | **Possible Values** | **Description and Additional Information** | +|-------------|-----------|------| +| category | security violation | | +| timestamp | Timestamp of the request | UTC | +| message | | Provides summary info about if a request was rejected or flagged, from what source, and due to what attack types.| +| level | `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` | Security violation events are only `INFO` for now| +| hostname | |Hostname used in the request| +| environment | | | +| app | | | +| component | | | +| gateway | | | +| corelationId | | | +| http.request_endpoint | | Request URI | +| http.request_method | | Method used for the request| +| request_outcome |`REJECTED`, `PASSED`| The outcome of the request after Controller App Security processed the request.| +| request_outcome_reason | `SECURITY_WAF_OK`, `SECURITY_WAF_VIOLATION`, `SECURITY_WAF_FLAGGED`, `SECURITY_WAF_BYPASS`, `SECURITY_NGINX_VIOLATION`, `SECURITY_WAF_FLAGGED` | request_outcome_reason provides the reason why App Security rejected or flagged a request to be reviewed. Outcome reasons for `SECURITY_WAF_BYPASSED` and `SECURITY_NGINX_VIOLATION` have not been implemented.
{{< note >}} App Security Events are not created for requests that don't trigger any violations. This means you should not see Events with `outcome_reason = SECURITY_WAF_OK`.{{< /note >}}| +| http.response_code | | Response code returned to App Security. A `0` code is returned if App Security did not block the request.| +| http.hostname | | Hostname of request| +| http.remote_addr | | Client IP of the request| +| http.remote_port | | Port of the client initiating the request| +| http.server_addr | | Server IP address that NGINX is listening on| +| http.server_port | | Server IP port that NGINX is listening on| +| waf.http_request | | Request including header, body, etc.| +| waf.support_id | | ID seen on the App Security rejection page| +| waf.signature_ids | | ID list of signatures triggered with the request. It usually does not go above three signature IDs.| +| waf.signature_names | | Names of signatures triggered with the request. It usually does not go above three signature names.| +|waf.attack_types | | Attack types triggered by the request. It can be based on any of the above signature or other protection mechanisms used in the WAF policy. It usually does not go above three attack types.| +| violations ||Comma-separated list of logical violation names| +| sub_violation ||More specific violations within ‘HTTP protocol compliance failed’ (violation = `VIOL_HTTP_PROTOCOL`) and/or ‘Evasion technique detected’ violations (violation = `VIOL_EVASION`) | +| sig_cves||Signature CVEs value of the matched signatures.| +| is_truncated||A flag that returns true if a request is truncated in the security events, or false if it is not. | +| x_forwarded_for_header_value||X-Forwarded-For header information. This option is commonly used when proxies are involved to track the originator of the request.| + +{{< /bootstrap-table >}} + +### Attack Types and Description + +Each signature and violation has an Attack Type which is the attack vector WAF protects from. The list of Attack Types and descriptions are listed here. You may see these attack types and violations in Security Events and Metrics. + +{{}} + +| **Attack Type** | **Description** | +|-----------------|-----------------| +| Server-Side Template Injection | Some applications use server-side templates for better modularity. This attack occurs when a non-sanitized input containing template directives is embedded into a server-side template which then leads to the execution of the injected code when rendered. | +|Insecure File Upload | Many applications allow uploading files to the server, such as images or documents. An application that does not correctly restrict the type of the uploaded files or the upload folder path can be exploited by attackers to upload files, called ‘WebShells’, containing malicious code that later will be executed or override the server configuration.| +|NoSQL Injection|NoSQL databases are non-relational databases, and even though they do not use the SQL syntax, non-sanitized input might let attackers control the original query via a database-specific programming language.| +|Insecure Deserialization | This is an attack against an application that receives serialized objects. An application which does not restrict which objects might be deserialized could be exploited by attackers sending specific object called ‘gadgets’, that could trigger arbitrary code execution when deserialized.| +|XML External Entities (XXE)| This is a type of attack against an application that parses XML input. This attack occurs when XML input containing a reference to an external entity is processed by a weakly configured XML parser.| +|Server-Side Request Forgery (SSRF) | Some applications receive a URL as an input and use it to exchange data with another service. An attacker could provide special URLs to read or update internal resources such as localhost services, cloud metadata servers, internal network web applications or HTTP enabled databases.| +|Cache Poisoning| Cache poisoning is an attack against the integrity of an intermediate Web cache repository, in which genuine content cached for an arbitrary URL is replaced with spoofed content.| +|WebSocket Parser Attack | WebSocket parser attack targets the functionality of the WebSocket parser to crash it or force the parser to work abnormally.| +|GWT Parser Attack | This attack targets the functionality of the GWT parser to crash it or force the parser to work abnormally.| +|Cross-site Request Forgery | An attacker exploits the web application’s assumption and trust that the authenticated user is purposely sending requests to perform actions or commands, while the attacker is causing the user to send the commands without the user’s knowledge or consent.| +|JSON Parser Attack |This attack targets the functionality of the JSON parser to crash it or force the parser to work abnormally.| +|Malicious File Upload|Malicious file upload occurs when a user tries to upload a malicious file to the web application. This could allow remote attackers to cause Server Infection, Network Infection, Buffer Overflow, and Remote Comma Execution.| +|HTTP Response Splitting|Specially crafted HTTP messages can manipulate the webserver or cache’s standard behavior. This can lead to XSS, and cache poisoning.| +|Session Hijacking|An attacker can steal a valid web session from legitimate users to gain unauthorized access.| +|XML Parser Attack|This attack targets the functionality of the XML parser to crash it or force the parser to work abnormally.| +|Parameter Tampering|By changing certain parameters in a URL or web page form, attackers can successfully attack the web application business logic.| +|Injection Attempt|This is an attack where an attacker injects OS commands, active script commands (in JavaScript or any other scripting language), or SQL commands into various parts of an HTTP request, for the injected content to run on remote systems. The two most common injection attacks are SQL injection and Cross-Site Scripting.| +|Brute Force Attack|Brute-force attacks are mainly used for guessing passwords and bypassing access control of an application by executing many different attempts.| +|Forceful Browsing|This attack occurs when an attacker is directly accessing a URL, which could grant access to a restricted part of the web site.| +|HTTP Request Smuggling Attack|Specially crafted HTTP messages can manipulate the webserver or cache’s standard behavior. This can lead to XSS, and cache poisoning.| +|HTTP Parser Attack|HTTP parser attack targets the functionality of the HTTP parser to crash it or force the parser to work abnormally.| +|Other Application Activity|This attack does not belong to any specific attack category, however, it is a violation of the user-defined security policy.| +|Denial of Service|A denial-of-service (DoS) attack represents a family of attacks aimed to exhaust the application server resources up to a point that the application cannot respond to legitimate traffic, either because it has crashed, or because its slow response renders it effectively unavailable.| +|Cross-Site Scripting (XSS)|Cross-Site Scripting (XSS) occurs when a web application does not sanitize user-supplied input and places it directly into the page returned to the user. Usually, the attacker will submit malicious JavaScript, VBScript, ActiveX, HTML, or Flash code to the vulnerable website.| +|SQL-Injection|SQL-Injection occurs when a web application does not sanitize user-supplied input and places it directly into the SQL statement. This attack allows remote attackers to run SQL statements on the internal database.| +|Command Execution|Web applications can be tricked to execute operating system commands, injected from a remote machine if user-supplied input is not properly checked by the web application.| +|Server Side Code Injection|An attacker can submit server-side code by invalidated input. The webserver, when parsing malicious input, may execute operating system commands or access restricted files.| +|LDAP Injection|If user-supplied input is not correctly sanitized, the attacker could change the construction of LDAP statements. Successful exploitation results in information gathering, system integrity compromise, and possible modification of the LDAP tree.| +|XPath Injection|XPath-Injection occurs when a web application does not sanitize user-supplied input but places it directly into the XML document query. Successful exploitation results in information gathering and system integrity compromise.| +|Path Traversal|Path traversal can be used to bypass the webserver root and request various files, including system files or private directories and resources. This attack can lead to information disclosure and possible exposure of sensitive system information.| +|Directory Indexing|This is a directory listing attempt which can lead to information disclosure and possible exposure of sensitive system information. Directory Indexing attacks usually target webservers that are not correctly configured, or which have a vulnerable component that allows Directory Indexing.| +|Information Leakage|Sensitive information may be present within HTML comments, error messages, source code, or simply left in files that are accessible by remote clients. Besides, attackers can manipulate the application to reveal classified information like credit card numbers. This can lead to the disclosure of sensitive system information which may be used by an attacker to further compromise the system.| +|Predictable Resource Location|By making educated guesses, the attacker could discover hidden web site content and functionality, such as configuration, temporary, backup, or sample files. This can lead to the disclosure of sensitive system information which may be used by an attacker to compromise the system.| +|Buffer Overflow|Buffer Overflow could be triggered when data written to memory exceeds the allocated size of the buffer for that data. This could lead to the Denial of Service or arbitrary code execution.| +|Authentication/Authorization Attacks|Authentication/Authorization Attacks occur when a web site permits an attacker to access sensitive content or functionality without having to properly authenticate, or authorize, that resource.| +|Abuse of Functionality|Abuse of Functionality is an attack technique that uses a website’s features and functionality to consume, defraud, or circumvent access control mechanisms.| +|Vulnerability Scan|An attempt is made using an automatic tool to scan a webserver, or an application running on a webserver, for a possible vulnerability.| +|Detection Evasion|An attempt is made to evade detection of the attack on a webserver, by obfuscating the attack using various methods such as encodings and path manipulation.| +|Trojan/Backdoor/Spyware|This is an attack initiated by some form of malicious code.| +|Other Application Attacks|This is an attack which targets the web application and does not fall in any predefined category| +|Non-browser Client|An attempt is made by a non-browser client to explore the site.| +|Remote File Include|Remote File Inclusion attacks allow attackers to run arbitrary code on a vulnerable website.| + +{{< /bootstrap-table >}} + +### Violations and Descriptions + +Each violation consists of one or more security checks (for example, attack signatures, HTTP RFC compliance, and evasion techniques). Each security check could be a specific attack signature, a specific HTTP Compliance check, or a specific evasion technique that is triggered within WAF. + +{{}} + +| **Violation Value** | **Name** | **Description** | +|-----------------|-----------------|--------------------| +|`VIOL_ASM_COOKIE_MODIFIED` | Modified ASM cookie |The system checks that the request contains an ASM cookie that has not been modified or tampered with. Blocks modified requests.| +|`VIOL_ATTACK_SIGNATURE`|Attack signature detected|The system examines the HTTP message for known attacks by matching it against known attack patterns. See signature_ids and signature_names attributes for specific signatures matched.| +|`VIOL_COOKIE_EXPIRED`|Expired timestamp|The system checks that the timestamp in the HTTP cookie is not old. An old timestamp indicates that a client session has expired. Blocks expired requests. The timestamp is extracted and validated against the current time. If the timestamp is expired and it is not an entry point, the system issues the Expired Timestamp violation.| +|`VIOL_COOKIE_LENGTH`|Illegal cookie length|The system checks that the request does not include a cookie header that exceeds the acceptable length specified in the security policy.| +|`VIOL_COOKIE_MALFORMED`|Cookie not RFC-compliant|This violation occurs when HTTP cookies contain at least one of the following components:
  • Quotation marks in the cookie name
  • A space in the cookie name.
  • An equal sign (=) in the cookie name. Note: A space between the cookie name and the equal sign (=), and between the equal sign (=) and cookie value is allowed.
  • An equal sign (=) before the cookie name.
  • A carriage return (hexadecimal value of 0xd) in the cookie name.
  • | +|`VIOL_ENCODING`|Failed to convert character|The system detects that one of the characters does not comply with the configured language encoding of the web application’s security policy.| +|`VIOL_EVASION`|Evasion technique detected|This category contains a list of evasion techniques that attackers use to bypass detection.| +|`VIOL_FILETYPE`|Illegal file type|The system checks that the requested file type is configured as a valid file type, or not configured as an invalid file type, within the security policy. Only for disallowed file types.| +|`VIOL_HEADER_LENGTH`|Illegal header length|The system checks that the request includes a total HTTP header length that does not exceed the length specified in the security policy. The actual size in the default policy is 4 KB.| +|`VIOL_HEADER_METACHAR`|Illegal meta character in header|The system checks that the values of all headers within the request only contain meta characters defined as allowed in the security policy.| +|`VIOL_HTTP_PROTOCOL`|HTTP protocol compliance failed|This category contains a list of validation checks that the system performs on HTTP requests to ensure that the requests are formatted properly.| +|`VIOL_HTTP_RESPONSE_STATUS`|Illegal HTTP response status|The server response contains an HTTP status code that is not defined as valid in the security policy.| +|`VIOL_JSON_MALFORMED`|Malformed JSON data|The system checks that the request contains JSON content that is well-formed. Enforces parsable JSON requests.| +|`VIOL_METHOD`|Illegal method|The system checks that the request references an HTTP request method that is found in the security policy. Enforces desired HTTP methods; GET and POST are always allowed. These HTTP methods are supported: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS.| +|`VIOL_QUERY_STRING_LENGTH`|Illegal query string length|The system checks that the request contains a query string whose length does not exceed the acceptable length specified in the security policy. In * file type entity. The actual size is 2 KB.| +|`VIOL_REQUEST_MAX_LENGTH`|Request length exceeds defined buffer size|The system checks that the request length is not larger than the maximum memory buffer size of the ASM. Note that this is a BIG-IP unit parameter that protects the ASM from consuming too much memory across all security policies which are active on the device. Default is 10MB.| +|`VIOL_URL_LENGTH`|Illegal URL length|The system checks that the request is for a URL whose length does not exceed the acceptable length specified in the security policy. In * file type entity. The actual size is 2 KB.| +|`VIOL_URL_METACHAR`|Illegal meta character in URL|The system checks that the incoming request includes a URL that contains only meta characters defined as allowed in the security policy. Enforces the desired set of acceptable characters.| +|`VIOL_XML_FORMAT`|XML data does not comply with format settings|The system checks that the request contains XML data that complies with the various document limits within the defense configuration in the security policy’s XML profile. Enforces proper XML requests and the data failed format/defense settings such as the maximum document length. This violation is generated when a problem in an XML document is detected (for example, an XML bomb), generally checking the message according to boundaries such as the message’s size, maximum depth, and the maximum number of children.| +|`VIOL_XML_MALFORMED`|Malformed XML data|The system checks that the request contains XML data that is well-formed, according to W3C standards. Enforces proper XML requests.| +|`VIOL_RATING_THREAT`|Request is likely a threat|The combination of violations in this request determined that the request is likely to be a threat.| +|`VIOL_PARAMETER_NAME_METACHAR`|Illegal meta character in parameter name|The system checks that all parameter names within the incoming request only contain meta characters defined as allowed in the security policy.| +|`VIOL_PARAMETER_VALUE_METACHAR`|Illegal meta character in value|The system checks that all parameter values, XML element/attribute values, or JSON values within the request only contain meta characters defined as allowed in the security policy. Enforces proper input values.| + +{{< /bootstrap-table >}} + +### HTTP RFC Sub-violations and Descriptions + +The following table specifies the HTTP Compliance sub-violation settings. All are supported in NGINX, but not all are enabled in the default App Protect security template. The table specifies which. Some of the checks are enforced by NGINX Plus and App Protect only gets a notification. Note: In this case, the request is always blocked regardless of the App Protect policy. + +{{}} + +| **Sub-violation** | **Description** | +|-----------------|-----------------| +|Null in request (null in body, null in the header is done by NGINX Plus)|The system issues a violation for requests with a NULL character anywhere in the request (except for a NULL in the binary part of a multipart request).| +|Multiple host headers|Examines requests to ensure that they contain only a single “Host” header.| +|The host header contains IP address|The system verifies that the request’s host header value is not an IP address to prevent non-standard requests.| +|CRLF characters before request start|Examines whether there is a CRLF character before the request method. If there is, the system issues a violation.| +|Chunked request with Content-Length header|The system checks for a Content-Length header within chunked requests.| +|Check the maximum number of parameters|The system compares the number of parameters in the request to the maximum configured number of parameters. Maximum is set to 500.| +|Check the maximum number of headers|The system compares the request headers to the maximal configured number of headers. Maximum is set to 50.| +|Unescaped space in URL|The system checks that there is no unescaped space within the URL in the request line. Such spaces split URLs introducing ambiguity on picking the actual one.| +|Bad multipart/form-data request parsing|When the content type of a request header contains the substring “Multipart/form-data”, the system checks whether each multipart request chunk contains the strings “Content-Disposition” and “Name”. If they do not, the system issues a violation.| +|Bad multipart parameters parsing|The system checks the following:
    • A boundary follows immediately after request headers.
    • The parameter value matches the format: ‘name=”param_key”;rn.
    • A chunked body contains at least one CRLF.
    • A chunked body ends with CRLF.
    • Final boundary was found on multipart request.
    • There is no payload after final boundary.
    • If one of these is false, the system issues a violation.
    • | + +{{< /bootstrap-table >}} + +### Evasion Techniques and Description + +The following table specifies the Evasion Techniques sub-violation values and descriptions. + +{{}} + +| **Sub-violation** | **Description** | +|-----------------|-----------------| +|%u decoding|The system performs Microsoft %u Unicode decoding (%UXXXX where X is a hexadecimal digit). For example, the system turns a%u002fb to a/b. The system performs this action on URI and parameter input to evaluate if the request contains an attack.| +|Apache whitespace|The system detects the following characters in the URI: 9 (0x09), 11 (0x0B), 12 (0x0C), and 13 (0x0D).| +|Bad unescape|The system detects illegal HEX encoding. Reports unescaping errors (such as %RR).| +|Bare byte decoding|The system detects higher ASCII bytes (greater than 127).| +|Directory traversals|The system ensures that directory traversal commands like ../ are not part of the URL. While requests generated by a browser should not contain directory traversal instructions, sometimes requests generated by JavaScript have them.| +|IIS backslashes|The system normalizes backslashes (`\`) to slashes (`/`) for further processing.| +|IIS Unicode codepoints|The system handles the mapping of IIS specific non-ASCII codepoints. Indicates that, when a character is greater than ‘0x00FF’, the system decodes %u according to an ANSI Latin 1 (Windows 1252) code page mapping. For example, the system turns a%u2044b to a/b. The system performs this action on URI and parameter input.| +|Multiple decoding|The system decodes URI and parameter values multiple times according to the number specified before the request is considered an evasion. The maximum decoding is 3.| + +{{< /bootstrap-table >}} + +{{< versions "3.12" "latest" "ctrlvers" >}} diff --git a/content/controller/app-delivery/security/concepts/bring-your-own-policy.md b/content/controller/app-delivery/security/concepts/bring-your-own-policy.md new file mode 100644 index 000000000..9345d0b73 --- /dev/null +++ b/content/controller/app-delivery/security/concepts/bring-your-own-policy.md @@ -0,0 +1,61 @@ +--- +description: Learn how to use your own F5 NGINX App Protect WAF policies with NGINX Controller. +docs: DOCS-481 +doctypes: +- concept +tags: +- docs +title: Bring Your Own WAF Policy +toc: true +weight: 300 +--- + +## Overview + +You can use the App Security Add-on for F5 NGINX Controller ADC to bring your own ("BYO") NGINX App Protect policies into NGINX Controller. This lets you use your existing declarative JSON policies from NGINX App Protect to protect your ADC app components. + +A BYO NGINX App Protect policy lets you maintain consistent Security Policies across your F5 WAF and NGINX WAF deployments. For example, say you already use F5 BIG-IP Application Security Manager (ASM) or F5 Advanced WAF and are now adopting NGINX Controller App Security. You can convert your XML Security Policies to an NGINX App Protect policy by using the [NGINX App Protect Policy Converter tool](https://docs.nginx.com/nginx-app-protect/configuration/#policy-converter). + +To export a policy from F5 Advanced WAF or ASM, take the following steps: + +1. Convert your F5 XML security policy to an NGINX App Protect WAF declarative JSON policy using the [NGINX App Protect Policy Converter tool](https://docs.nginx.com/nginx-app-protect/configuration/#policy-converter). + {{}}We recommend using the Converter tool that corresponds with the most recent NGINX App Protect version.{{}} + +2. Use the NGINX App Protect declarative JSON policy as the WAF policy in NGINX Controller for your app component(s). + +  + +With a BYO NGINX App Protect policy, you can also provide customized security by crafting an NGINX App Protect WAF policy that specifies the security controls appropriate for your apps. For more information on how to configure an NGINX App Protect WAF policy, refer to the [NGINX App Protect Configuration Guide](https://docs.nginx.com/nginx-app-protect/configuration/). + +## Security Strategy for BYO NGINX App Protect Policy + +The BYO NGINX App Protect policy uses the concept of a [Security Strategy]({{< relref "/controller/app-delivery/security/concepts/what-is-waf.md#security-policy-and-security-strategy" >}}) + +With the BYO NGINX App Protect policy feature, you can specify the exact NGINX App Protect policy for the Security Strategy. Then, the Security Strategy can be shared across -- and referenced by -- multiple app components. +A Security Strategy can be comprised of various app-security-related Security Policies. NGINX Controller includes a custom NGINX App Protect WAF policy, which can be assigned to a Security Strategy. + +You can also add a BYO NGINX App Protect WAF policy in JSON format to NGINX Controller "as-is" for use in a Security Strategy. + + +An **App Component** contains a reference to a **Security Strategy**, which, in turn, references a Security Policy. This Security Policy contains the **NGINX App Protect WAF policy**. + +Refer to the topic [Enable WAF for a Component Using Your Own NGINX App Protect Policy]({{< relref "/controller/app-delivery/security/tutorials/add-app-security-with-waf.md#enable-waf-for-a-component-using-your-own-nap-policy-beta" >}}) to get started. + +## Limitations + +BYO NAP WAF policy currently has the following limitations: + +- The size of the BYO NGINX App Protect WAF policy that's referenced by app components may affect application performance. +- References to external files, such as the following, in the NGINX App Protect WAF JSON declarative policy are not supported: + - User Defined Signatures + - Security controls in external references + - Referenced OpenAPI spec files +- Cookie modification (`VIOL_COOKIE_MODIFIED`) is not supported. +- gRPC protection is not supported. +- Protection with partial security visibility: + - Not all security metrics dimensions are available for the following: + - Bot violations + - CSRF origin validation violations + - User-defined browser violations + +{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/security/concepts/extend-app-security-snippets.md b/content/controller/app-delivery/security/concepts/extend-app-security-snippets.md new file mode 100644 index 000000000..9f5375b47 --- /dev/null +++ b/content/controller/app-delivery/security/concepts/extend-app-security-snippets.md @@ -0,0 +1,242 @@ +--- +description: Learn how to extend your App Security configurations using F5 NGINX Controller + Snippets. +docs: DOCS-338 +doctypes: +- concept +- reference +tags: +- task +title: Extend App Security with Snippets +toc: true +weight: 400 +--- + +## Overview + +F5 NGINX Controller [Snippets]({{< relref "/controller/app-delivery/about-snippets.md" >}}) let you customize your NGINX configuration by adding NGINX directives that aren't represented by the NGINX Controller API. + +Snippets also let you customize App Security for your Components by adding NGINX App Protect directives that aren't present in the NGINX Controller API. You can use Snippets when [tuning your NGINX App Protect WAF performance]({{< relref "/controller/app-delivery/security/tutorials/tune-waf-for-app" >}}) as well. + +{{< caution >}} +When you use Snippets to customize your NGINX configuration, your changes are applied to the `nginx.conf` file *as is*. NGINX Controller does not verify that your configuration is valid before applying the Snippet. + +We strongly recommend verifying Snippets in a lab environment before making any changes in production. +{{< /caution >}} + +## App Security Usage Examples + +{{< caution >}} +The examples provided here are intended for demonstration purposes only. +We strongly recommend verifying Snippets in a lab environment before making any changes in production. +{{< /caution >}} + +### Define a Backup Location for Security Event Logs + +When you [enable WAF on a Component]({{< relref "/controller/app-delivery/security/tutorials/add-app-security-with-waf" >}}), all Security Events are sent to NGINX Controller logs via syslog. The following example uses the `app_protect_security_log` directive in a URI Snippet to define a local backup location for Security Event logs. You can also send Security Events to another syslog server or to `stderr` by inserting an additional URI Snippet with the `app_protect_security_log` directive. + +{{< caution >}} +Using local files as a backup for Security Events may use up disk space and affect your system performance. In production environments, setting up a remote file or a remote syslog server for backup purposes are good alternatives to using a local backup. +{{< /caution >}} + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "ingress": { + "uris": { + "/": { + } + }, + "gatewayRefs": [ + { + "ref": "/services/environments/environment-name/gateways/" + } + ] + }, + "security": { + "strategyRef": { + "ref": "/security/strategies/balanced_default" + }, + "waf": { + "isEnabled": true + } + }, + "backend": { + "workloadGroups": { + "servers": { + "uris": { + "https://test-01.example.com": { + }, + "https://test-02.example.com": { + } + } + } + } + }, + "configSnippets": { + "uriSnippets": [ + { + "directives": [ + { + "directive":"app_protect_security_log", + "args": ["/etc/controller-agent/configurator/auxfiles/log-default.json", "/var/log/app_protect/security.log"] + } + ] + } + ] + } + } +} +``` + +### Add Location of User-Defined Signature Definition File + +When using [Bring Your Own WAF Policy]({{< relref "/controller/app-delivery/security/concepts/bring-your-own-policy" >}}) in NGINX Controller, you can define a URI Snippet for a Gateway API to define the location for your User-Defined Signature Definition file. The User-Defined Signature can then be referenced in the custom NGINX App Protect WAF policy that you use for your Components. + +{{< note >}} +The file that contains the signature definition must already exist on your NGINX App Protect WAF instances. For more information regarding User-Defined Signatures, refer to the [NGINX App Protect WAF Configuration Guide](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#user-defined-signatures). +{{< /note >}} + +The following example adds a URI snippet to the Gateway API definition that provides the location of the User-Defined Signature Definition file. + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "httpSnippet": { + "directives": [ + { + "directive": "app_protect_user_defined_signatures", + "args": ["app_protect_user_defined_signature_def_01"] + } + ] + } + }, + "ingress": { + "uris": { + "": {} + }, + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + } + } + } +} + +``` + +### Harden Security using Fail-Closed + +Setting NGINX App Protect to "fail-closed" drops application traffic when certain conditions exist. This setting lets you err on the side of greater security as opposed to convenience, providing better protection for your applications when NGINX App Protect is not available. + +The example below adds HTTP Snippets to the Gateway that set the following NGINX App Protect directives to `drop`, or "fail-closed": + +- `app_protect_failure_mode_action` +- `app_protect_compressed_requests_action` +- `app_protect_request_buffer_overflow_action` + +Refer to the [NGINX App Protect Configuration Guide](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#global-directives) for more information about these directives and the conditions to which each applies. + +```json +{ + "metadata": { + "name": "gateway-name" + }, + "desiredState": { + "configSnippets": { + "httpSnippet": { + "directives": [ + { + "directive": "app_protect_failure_mode_action", + "args": ["drop"] + }, + { + "directive": "app_protect_compressed_requests_action", + "args": ["drop"] + }, + { + "directive": "app_protect_request_buffer_overflow_action", + "args": ["drop"] + } + ] + } + }, + "ingress": { + "uris": { + "http://example.com:8000": {} + }, + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + } + } + } +} + +``` + +## Tuning WAF Performance Usage Examples + +{{< caution >}} +The examples provided here are intended for demonstration purposes only. +We strongly recommend verifying Snippets in a lab environment before making any changes in production. +{{< /caution >}} + +## Set the Memory and CPU Threshold Values + +This example adds an HTTP Snippet to a Gateway to control the memory and CPU threshold values which determine when NGINX App Protect enters and exits failure mode. + +In *failure mode*, App Protect WAF stops processing app traffic. Traffic is either dropped or passed through, as determined by the `app_protect_failure_mode_action` directive. + +The example below directs NGINX App Protect WAF to enter failure mode when memory utilization or CPU utilization reaches 85% and to exit failure mode when memory or CPU utilization drops to 60%. + +```json +{ + "metadata": { + "name": "" + }, + "desiredState": { + "configSnippets": { + "httpSnippet": { + "directives": [ + { + "directive": "app_protect_physical_memory_util_thresholds", + "args": ["high=85 low=60"] + }, + { + "directive": "app_protect_cpu_thresholds", + "args": ["high=85 low=60"] + } + ] + } + }, + "ingress": { + "uris": { + "http://example.com:8000": {} + }, + "placement": { + "instanceRefs": [ + { + "ref": "/infrastructure/locations/unspecified/instances/" + } + ] + } + } + } +} +``` + +{{< versions "3.22" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/security/concepts/what-is-waf.md b/content/controller/app-delivery/security/concepts/what-is-waf.md new file mode 100644 index 000000000..96a7e120f --- /dev/null +++ b/content/controller/app-delivery/security/concepts/what-is-waf.md @@ -0,0 +1,65 @@ +--- +description: Overview of the App Security module's WAF feature. +docs: DOCS-483 +doctypes: +- concept +- reference +tags: +- docs +title: About App Security +toc: true +weight: 100 +--- + +## Overview + +The App Security Add-on for F5 NGINX Controller ADC lets you protect your applications with a web applications firewall (WAF). The WAF protects your apps from a variety of application layer attacks such as [cross-site scripting (XSS)](https://www.f5.com/services/resources/glossary/cross-site-scripting-xss-or-css), [SQL injection](https://www.f5.com/services/resources/glossary/sql-injection), and [cookie poisoning](https://www.f5.com/services/resources/glossary/cookie-poisoning), among others. + +A WAF protects your web apps by filtering, monitoring, and blocking any malicious HTTP/S traffic traveling to the web application, and prevents any unauthorized data from leaving the app. It does this by adhering to a set of policies that help determine what traffic is malicious and what traffic is safe. Just as a proxy server acts as an intermediary to protect the identity of a client, a WAF operates in similar fashion but in the reverse—called a reverse proxy—acting as an intermediary that protects the web app server from a potentially malicious client. + +{{< see-also >}} To learn more about what a WAF is and how it works, check out the F5 DevCentral video: [What is a Web Application Firewall?](https://www.youtube.com/watch?v=p8CQcF_9280){{< /see-also >}} + +  + +## How it works + +App Security on NGINX Controller provides an app‑centric self‑service model to address the security needs of modern apps. + +The App Security add-on uses the NGINX App Protect Web Application Firewall (NGINX App Protect WAF) enforcement engine on the data path (data plane). +When you enable WAF on an app component using NGINX Controller, a security policy (sets of security controls and enforcement logic) is deployed and applied to configured NGINX App Protect instances that process traffic for the app component. + +NGINX App Protect WAF inspects incoming traffic as specified in the Security Policy to identify potential threats. When malicious traffic is suspected or blocked, the NGINX Controller Analytics module logs security events and metrics. These are then included in the NGINX Controller Threat Visibility and Analytics reporting. + +{{< see-also >}}To learn more, read the [Threat Visibility and Analytics](https://www.nginx.com/blog/threat-visibility-analytics-nginx-controller-app-security/) blog post on [nginx.com](https://nginx.com).{{< /see-also >}} + +{{< img src="/ctlr/img/cas-overview.png" title="" alt="Controller App Security Overview Image" width="75%">}} + +## Security Policy + +In NGINX Controller, the Security Policy contains an NGINX App Protect WAF policy. The NGINX App Protect WAF policy has security controls and settings in a declarative JSON format. The Security Policy defines the rules and settings for application traffic inspection, detection of malicious traffic, and handling violations when they occur. For more about creating, updating, or deleting Security Policies, see the [Policies API Reference](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/#operation/listPolicies). + +When enabling WAF to protect your Apps, you can either add your own custom Security Policy or use the default Security Policy. + +## Security Strategy + +A Security Strategy is a logical container for multiple Security Policies. In a Security Strategy, you can reference a Security Policy that represents a security risk profile. For example, you can map low- or high-risk security profiles to different Security Strategies as you deem fit for your Apps' specific use case or organizational needs. + +When you enable security on the App Component, you can specify the Security Strategy to protect it. You can use the same Security Strategy across multiple app components. The Security Policy referenced in the Security Strategy detects and protects against malicious traffic to the App Component. + +- **App Component** references **Security Strategy**; +- **Security Strategy** references **Security Policy**. + +For more about creating, updating, or deleting Security Policies, see the [Strategies API Reference](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/#tag/Strategies). + +You can use a custom Security Strategy to protect your Apps, or you can use NGINX Controller's default Security Strategy, which contains a pre-defined WAF policy. + +{{< note >}} + +The `/services/strategies/balanced_default` endpoint was replaced by `/security/strategies/balanced_default` in NGINX Controller ADC v3.18. + +- Specify the `StrategyRef` setting with `/security/strategies/balanced_default` instead of `/services/strategies/balanced_default`. + +Refer to the AskF5 knowledge base article [K02089505](https://support.f5.com/csp/article/K02089505) for more information. + +{{< /note >}} + diff --git a/content/controller/app-delivery/security/tutorials/_index.md b/content/controller/app-delivery/security/tutorials/_index.md new file mode 100644 index 000000000..5dc3298fd --- /dev/null +++ b/content/controller/app-delivery/security/tutorials/_index.md @@ -0,0 +1,8 @@ +--- +aliases: +- services/apps/security/tutorials/_index.md +description: How to deploy and configure the App Security module +title: Manage App Security +weight: 200 +url: /nginx-controller/app-delivery/security/tutorials/ +--- diff --git a/content/controller/app-delivery/security/tutorials/add-app-security-with-waf.md b/content/controller/app-delivery/security/tutorials/add-app-security-with-waf.md new file mode 100644 index 000000000..bb01665e2 --- /dev/null +++ b/content/controller/app-delivery/security/tutorials/add-app-security-with-waf.md @@ -0,0 +1,273 @@ +--- +description: How to add F5 NGINX Controller App Security to your applications. +docs: DOCS-484 +doctypes: +- concept +- reference +tags: +- task +title: Manage App Security +toc: true +weight: 100 +--- + +## Overview + +You can use the App Security add-on for F5 NGINX Controller ADC to enable Web Application Firewall (WAF) capabilities to protect your applications. WAF lets you flag or block suspicious requests or attacks. WAF can be added to individual app components. + + +## Before You Begin + +Before proceeding with this guide, complete the following tasks. +{{}}These steps may need to be completed by a user with admin permissions.{{}} + +1. [Add an NGINX App Protect instance]({{< relref "/controller/infrastructure/instances/add-nap-instance.md" >}}) to NGINX Controller. + +In addition, the following resources must exist in order to complete the steps in this topic: + +- [Environment]({{< relref "/controller/services/manage-environments.md" >}}) +- [Gateway]({{< relref "/controller/services/manage-gateways.md" >}}) +- [Certs]({{< relref "/controller/services/manage-gateways.md" >}}) (required if your Components use HTTPS) +- [App and Component(s)]({{< relref "/controller/app-delivery/manage-apps.md" >}}) + +## Enable WAF for a Component using the Default Security Strategy + +To enable WAF functionality for Application Security using the default security strategy, send a POST or PUT request to the Components endpoint, with a JSON object similar to the following: + +```json + "security": { + "waf": { + "isEnabled": true + } + } +``` + +{{}}You need READ access to the `/security/strategies/` API path to enable WAF on a component. By default, only users with an admin role have full access to all API endpoint resources.{{}} + +This JSON object should be added to the Component endpoint similar to the following example: + +```json +{ + "metadata": { + "name": "secure", + "displayName": "protected web server", + "description": "ProtectedWeb Server", + "tags": [ + "dev", + "protected" + ] + }, + "desiredState": { + "ingress": { + "gatewayRefs": [ + { + "ref": "/services/environments/dev/gateways/dev-gw" + } + ], + "uris": { + "/secure": { + "matchMethod": "PREFIX" + } + } + }, + "security": { + "strategyRef": { + "ref": "/security/strategies/balanced_default" + }, + "waf": { + "isEnabled": true + } + }, + "backend": { + "ntlmAuthentication": "DISABLED", + "preserveHostHeader": "DISABLED", + "workloadGroups": { + "farm": { + "locationRefs": [ + { + "ref": "/infrastructure/locations/unspecified" + } + ], + "loadBalancingMethod": { + "type": "ROUND_ROBIN" + }, + "uris": { + "http://{{workload-1}}:8080": { + "isBackup": false, + "isDown": false, + "isDrain": false, + "resolve": "DISABLED" + }, + "http://{{workload-2}}:8080": { + "isBackup": false, + "isDown": false, + "isDrain": false, + "resolve": "DISABLED" + }, + "http://{{workload-3}}:8080": { + "isBackup": false, + "isDown": false, + "isDrain": false, + "resolve": "DISABLED" + }, + "http://{{workload-4}}:8080": { + "isBackup": false, + "isDown": false, + "isDrain": false, + "resolve": "DISABLED" + } + } + } + } + }, + "logging": { + "errorLog": "ENABLED", + "accessLog": { + "state": "DISABLED", + "format": "" + } + } + } +} +``` + +## Enable WAF for a Component Using Your Own NGINX App Protect WAF Policy + +Instead of using NGINX Controller's default policy for WAF, you can [bring your own NGINX App Protect Policy]({{< relref "/controller/app-delivery/security/concepts/bring-your-own-policy.md" >}}) for use in a Security Strategy to protect your app components. + +To do so, you first need to upload your NGINX App Protect WAF declarative JSON policy to the Security Policy endpoint and reference it in a Security Strategy. Then, you can reference the Security Strategy in the Component where you are enabling WAF. + +### Upload your NGINX App Protect WAF Policy + +To upload your NGINX App Protect declarative JSON Policy to NGINX Controller, use an HTTP client like cURL and send a `PUT` request to the [Security Policy REST API}(https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/) +The JSON object should be similar to the example below: + +```json +{ + "metadata": { + "name": "yourPolicyName", + "displayName": "App Protect Policy", + "description": "my special NAP policy", + "tags": ["test1", "test2"] + }, + "desiredState": { + "content": {"policy": {"name": "/Common/yourPolicyName", "template": {"name": "POLICY_TEMPLATE_NGINX_BASE"}, "applicationLanguage": "utf-8", "enforcementMode": "blocking", "signatures": [{"signatureId": 123458888, "enabled": false}, {"signatureId": 200000098, "enabled": false}, {"signatureId": 200001475, "enabled": false}, {"signatureId": 200002595, "enabled": false}], "bot-defense": {"settings": {"isEnabled": false}}, "headers": [{"name": "*", "type": "wildcard", "decodeValueAsBase64": "disabled"}, {"name": "*-bin", "type": "wildcard", "decodeValueAsBase64": "required"}, {"name": "Referer", "type": "explicit", "decodeValueAsBase64": "disabled"}, {"name": "Authorization", "type": "explicit", "decodeValueAsBase64": "disabled"}, {"name": "Transfer-Encoding", "type": "explicit", "decodeValueAsBase64": "disabled"}], "cookies": [{"name": "*", "type": "wildcard", "decodeValueAsBase64": "disabled"}], "parameters": [{"name": "*", "type": "wildcard", "decodeValueAsBase64": "disabled"}]}} + } +} +``` + +### Create or Update a Security Strategy with a BYO NGINX App Protect WAF Policy + +You can create or update a Security Strategy that references a BYO NGINX App Protect WAF policy by sending a `PUT` request to the [Strategies REST API](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/) endpoint. + +The JSON object should be similar to the example below: + +```json + +{ + "metadata": { + "name": "yourSecStrategyName", + "displayName": "Security Strategy", + "description": "my special security strategy", + "tags": [ + "tag1", + "tag2" + ] + }, + "desiredState": { + "content": { + "securityPolicyRef": "/security/policies/yourPolicyName" + } + } +} + +``` + +### Add a BYO NGINX App Protect WAF policy to an App Component + +To add your BYO NGINX App Protect Policy to your App(s), you need to add a reference to the Security Strategy that contains the policy to your App Component. + +To do so, send a `PUT` request to the [Components REST API](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/#tag/Components) endpoint. + +The JSON object should be similar to the example below: + +```json + + "security": { + "strategyRef": { + "ref": "/security/strategies/" + }, + "waf": { + "isEnabled": true, + } + } + +``` + +{{< note >}} + +The following WAF security parameters are not supported in App Components that reference a custom Security Strategy: + +- `isMonitorOnly` +- `signatureOverrides` + +These preceding parameters are supported by NGINX Controller's default policy for WAF. + +{{< /note >}} + +  + +## Verify that WAF is Enabled + +Complete the tasks in this section to verify that the Web Application Firewall is active and processing traffic. + +To verify that WAF has been enabled by NGINX Controller App Security to protect your app component, send an HTTP GET request to the app component. + +**Example using NGINX Controller's default policy**: GET: `https://[gateway FQDN]/?a= +``` + +To block requests with XSS attempts, edit rules 941160 and 941320 in the CRS’s XSS Application Attack rule set (**REQUEST-941-APPLICATION-ATTACK-XSS.conf**) by adding `REQUEST_URI` at the start of the variables list for each rule: + +```nginx +SecRule REQUEST_URI|REQUEST_COOKIES|!REQUEST_COOKIES:/__utm/ ... +``` + +Reload the NGINX Plus configuration to read in the revised rule set: + +```none +sudo nginx -s reload +``` + +When we rerun Nikto, it reports only four items, and they are false positives for our application. + +```none +$ perl program/nikto.pl -h localhost ++ The anti-clickjacking X-Frame-Options header is not present. ++ The X-XSS-Protection header is not defined. This header can hint to the user agent to protect against some forms of XSS ++ The X-Content-Type-Options header is not set. This could allow the user agent to render the content of the site in a different fashion to the MIME type ++ No CGI Directories found (use '-C all' to force check all possible dirs) ++ /smg_Smxcfg30.exe?vcc=3560121183d3: This may be a Trend Micro Officescan 'backdoor'. ++ 7435 requests: 0 error(s) and 4 item(s) reported on remote host +``` + + + +## Limitations + +Inspecting the response body is not supported, so rules that do so have no effect. + + + +## Conclusion + +We used the OWASP ModSecurity Core Rule Set to protect our web application against a wide range of generic attacks and saw how the CRS blocks malicious requests generated by the Nikto scanning tool. + +For imformation about another supported ModSecurity rule set, see [Using the ModSecurity Rules from Trustwave SpiderLabs with the NGINX ModSecurity WAF]({{< relref "nginx-plus-modsecurity-waf-trustwave-spiderlabs-rules.md" >}}). + + + +## Resources + +- [OWASP ModSecurity Core Rule Set (CRS)](https://owasp.org/www-project-modsecurity-core-rule-set/) +- [Nikto scanning tool](https://github.com/sullo/nikto) +- [ModSecurity Reference Manual](https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-v2.x#ModSecurityreg_Reference_Manual) diff --git a/content/modsec-waf/admin-guide/nginx-plus-modsecurity-waf-trustwave-spiderlabs-rules.md b/content/modsec-waf/admin-guide/nginx-plus-modsecurity-waf-trustwave-spiderlabs-rules.md new file mode 100644 index 000000000..a873d4e58 --- /dev/null +++ b/content/modsec-waf/admin-guide/nginx-plus-modsecurity-waf-trustwave-spiderlabs-rules.md @@ -0,0 +1,145 @@ +--- +docs: DOCS-728 +title: Using the ModSecurity Rules from Trustwave SpiderLabs with the NGINX ModSecurity + WAF +toc: true +weight: 300 +--- + +{{< important >}} +{{% modsec-eol-notice %}} +{{< /important >}} + +This chapter explains how to configure the Commercial ModSecurity Rules from Trustwave SpiderLabs for use with the F5 NGINX ModSecurity web application firewall (WAF)). + + + +## Overview + +NGINX Plus Release 12 and later supports the [NGINX ModSecurity WAF](https://www.nginx.com/products/nginx/modules/nginx-waf/), which protects web applications against SQL Injection (SQLi), Remote Code Execution (RCE), Local File Include (LFI), cross -site scripting (XSS), and many other types of attack. + +The Commercial ModSecurity Rules from Trustwave SpiderLabs (which we refer to as the Trustwave Rules in this chapter) complement the [Open Web Application Security Project Core Rule Set](https://www.owasp.org/index.php/Category:OWASP_ModSecurity_Core_Rule_Set_Project) (OWASP CRS) with protection against specific attacks for many common applications including ASP.NET, Joomla, and WordPress. Additionally, the Trustwave SpiderLabs Rules provide IP reputation along with other capabilities, and are updated daily. + +This chapter builds on the basic configuration created in the [Installing the NGINX ModSecurity WAF]({{< relref "nginx-plus-modsecurity-waf-installation-logging.md" >}}) chapter, showing how to configure the Trustwave Rules to protect the demo web application configured in that chapter. + +The NGINX ModSecurity WAF also supports the OWASP CRS as described in [Using the OWASP CRS with the NGINX ModSecurity WAF]({{< relref "nginx-plus-modsecurity-waf-owasp-crs.md" >}}). + + + +## Prerequisites + +The NGINX ModSecurity WAF is available to NGINX Plus customers as a downloaded dynamic module at an additional cost. You can [try the NGINX ModSecurity WAF free for 30 days](https://www.nginx.com/free-trial-request/). To purchase or add the NGINX ModSecurity WAF to an existing NGINX Plus subscription, [contact the NGINX sales team](https://www.nginx.com/contact-sales/). + +You must purchase the Trustwave Rules directly from Trustwave SpiderLabs. + +As noted above, this chapter builds on [Installing the NGINX ModSecurity WAF]({{< relref "nginx-plus-modsecurity-waf-installation-logging.md" >}}) and assumes you have followed the instructions there to configure both the demo application and NGINX Plus as a reverse proxy. + + + +## Configuring the Trustwave SpiderLabs Rules + +Purchasing the Trustwave Rules gives you access to the ModSecurity Dashboard, which is a web portal where you can customize the Trustwave Rules on individual instances of the NGINX ModSecurity WAF (and other ModSecurity installations). The Dashboard simplifies configuration compared to the OWASP CRS, in two ways: + +- You don’t need to download rules onto individual NGINX Plus instances, because the NGINX ModSecurity WAF dynamic module downloads them automatically when the `SecRemoteRules` directive is included in the NGINX ModSecurity WAF configuration (see [Step 3](#waf-trustwave_configure-your-server) in the next section). +- You enable and disable rules -- a significant part of the configuration process -- with a GUI on the Dashboard instead of in NGINX ModSecurity WAF configuration files. + +To configure the Trustwave Rules for the demo application, first create a profile (or use the default one) that includes selected rules for protecting the application. The following instructions use the Dashboard's Configuration Wizard to create a profile. You then modify the local NGINX ModSecurity WAF configuration to make the NGINX ModSecurity WAF dynamic module download and apply the rules. + +Detailed instructions for using the Dashboard are not provided here. For more information, log in to the Dashboard and access the Dashboard FAQ. + + + +### Using the Configuration Wizard + +To configure the Trustwave Rules for the demo application, perform the following steps: + +1. Log in to the ModSecurity Dashboard and start the Configuration Wizard. + +2. Create a profile, enabling rules that are relevant for your application. None of the existing rules actually apply to our demo application, but for the purposes of this step select the WordPress‑related rules. You can also enable additional options, such as IP reputation. + + +3. At the **Configure your server** step, the Wizard presents the [`SecRemoteRules`](https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-(v2.x)#SecRemoteRules) directive that must be added to the NGINX ModSecurity WAF configuration, similar to this: + + ```nginx + SecRemoteRules https:// + ``` + + Here, the `SecRemoteRules` directive configures the NGINX ModSecurity WAF to download rules from the remote server, represented by the ``, using the provided ``. + + The Wizard does not provide an interface for adding the directive, so you need to edit **/etc/nginx/modsec/main.conf** manually and add the `SecRemoteRules` directive presented by the Wizard (we created the **main.conf** file in Step 4 of [Protecting the Demo Web Application]({{< relref "nginx-plus-modsecurity-waf-installation-logging.md#protecting-the-demo-web-application" >}}) in the installation chapter). Comment out any other rules that might already exist in the file, such as the `SecRule` directive defined in that step. + + ```nginx + # Include the recommended configuration + Include "/etc/nginx/modsec/modsecurity.conf" + SecRemoteRules https:// + ``` + +4. By default, the Trustwave Rules only detect malicious requests and don’t block them. To block the requests, add the following lines to **/etc/nginx/modsec/main.conf** below the `SecRemoteRules` directive you added in the previous step: + + ```nginx + SecDefaultAction "phase:2,log,auditlog,deny,status:403" + SecDefaultAction "phase:1,log,auditlog,deny,status:403" + ``` + + The [`SecDefaultAction`](https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-(v2.x)#SecDefaultAction) directive defines the default list of actions for the rules, with the `deny` action blocking malicious requests and returning status code `403`. + +5. Reload the NGINX Plus configuration: + + ```none + sudo nginx -s reload + ``` + + Reloading takes time as the rules are being downloaded from the remote server. + +6. Once the Wizard reports that NGINX Plus downloaded the rules, you can close the Wizard and start testing the rules. + + + +### Testing the Rules + +In the [Using the OWASP CRS with the NGINX ModSecurity WAF]({{< relref "nginx-plus-modsecurity-waf-owasp-crs.md" >}}) chapter, we use the Nikto scanning tool to test how the CRS blocks malicious requests. You cannot use a similar approach to test the Trustwave Rules, because they are specific rules that do not detect the generic attacks sent by Nikto. + +The Dashboard describes each Trustwave ModSecurity Rule. You can use that information to test how the rule behaves, by constructing and sending NGINX Plus malicious requests that trigger the rules. + + + +## Caveats for the `SecRemoteRules` Directive + +Currently, the only way to download the Trustwave Rules is with the `SecRemoteRules` directive. While the directive simplifies the process of getting the rules onto an instance of NGINX Plus where the NGINX ModSecurity WAF is dynamically loaded, the following caveats apply: + +- Every time you reload the NGINX Plus configuration or restart NGINX Plus, the rules are freshly downloaded from a remote server. To control what happens when the download fails, for example when connectivity to the remote server is lost, include the [`SecRemoteRulesFailAction`](https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-(v2.x)#SecRemoteRulesFailAction) directive in the NGINX ModSecurity WAF configuration. The `SecRemoteRulesFailAction` directive must appear above the `SecRemoteRules` directives in a NGINX ModSecurity WAF configuration file. + + The directive supports two values: + + - `Abort` forces the reload or restart of NGINX Plus to fail when the download of rules fails + - `Warn` lets NGINX Plus reload or restart successfully but with _no remote rules_ enabled + +- Downloading the rules takes some time, which delays the reload or restart operation. + +- Each `SecRemoteRules` definition leads to a separate download, further increasing the reload/restart time. To avoid that, try to minimize the number of `SecRemoteRules` definitions. Note that even if you define `SecRemoteRules` only in one file (such as the **/etc/nginx/modsec/main.conf** file modified in [Step 3](#waf-trustwave_configure-your-server) above), each time you read this file into NGINX Plus configuration using the [`modsecurity_rules_file`](https://github.com/SpiderLabs/ModSecurity-nginx#modsecurity_rules_file) directive (as in the **/etc/nginx/conf.d/proxy.conf** file created in [Configuring NGINX Plus as a Reverse Proxy]({{< relref "nginx-plus-modsecurity-waf-installation-logging/#configuring-nginx-plus-as-a-reverse-proxy" >}}) in the installation chapter), the NGINX ModSecurity WAF treats it as a separate definition. + +- Merging rules from different NGINX Plus configuration contexts ([`http {}`](https://nginx.org/en/docs/http/ngx_http_core_module.html#http), [`server {}`](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [`location {}`](http://nginx.org/en/docs/http/ngx_http_core_module.html#location)) also adds time to the reload/restart operation and consumes a lot of CPU, especially for a huge rule set such as the Trustwave Rules. In addition to minimizing the number of `SecRemoteRules` definitions, try to include all rule definitions in a single context. + +The Trustwave rule set contains more than 16,000 rules for protecting various applications. The more rules there are, the worse the NGINX ModSecurity WAF performs, so it is crucial that you enable only rules that are relevant for your application. + + + +## Limitations + +Inspecting the response body is not supported, so rules that do so have no effect. + + + +## Conclusion + +We configured Commercial ModSecurity Rules from Trustwave SpiderLabs to protect our application against WordPress‑related attacks. We also reviewed caveats for the `SecRemoteRules` directive. + +For information about using the OWASP CRS with the NGINX ModSecurity WAF, see [Using the OWASP CRS with the NGINX ModSecurity WAF]({{< relref "nginx-plus-modsecurity-waf-owasp-crs.md" >}}). + + + +## Resources + +- [Commercial ModSecurity Rules from Trustwave SpiderLabs](http://modsecurity.org) +- [ModSecurity Dashboard](https://modsecurity.org/) +- [ModSecurity Reference Manual](https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-(v2.x)#ModSecurityreg_Reference_Manual) diff --git a/content/modsec-waf/technical-specs.md b/content/modsec-waf/technical-specs.md new file mode 100644 index 000000000..6342fde0e --- /dev/null +++ b/content/modsec-waf/technical-specs.md @@ -0,0 +1,78 @@ +--- +docs: DOCS-729 +title: Technical Specifications +toc: true +weight: 300 +--- + +{{< important >}} +{{% modsec-eol-notice %}} +{{< /important >}} + +F5 NGINX ModSecurity WAF is a module for NGINX Plus. + +## Supported Distributions + +### Alpine Linux + +- 3.9 (x86_64) +- 3.10 (x86_64) +- 3.11 (x86_64) + +### Amazon Linux + +- 2018.03+ (x86_64) + +### Amazon Linux 2 + +- LTS (x86_64) + +### CentOS + +- 6.5+ (x86_64) +- 7.4+ (x86_64) +- 8.0+ (x86_64) + +### Debian + +- 9 (x86_64) +- 10 (x86_64) + +### FreeBSD + +- 11.2+ (amd64, x86_64) +- 12.0+ (amd64, x86_64) + +### Oracle Linux + +- 6.5+ (x86_64) +- 7.4+ (x86_64) + +### Red Hat Enterprise Linux + +- 6.5+ (x86_64) +- 7.4+ (x86_64) +- 8.0+ (x86_64) + +### SUSE Linux Enterprise Server + +- 12 (x86_64) +- 15 (x86_64) + +### Ubuntu + +- 16.04 LTS (x86_64) +- 18.04 LTS (x86_64) +- 19.10 (x86_64) +- 20.04 (x86_64) + +**Note:** CentOS/Oracle/Red Hat Enterprise Linux 6.5 users please see [this advisory](https://www.f5.com/company/blog/nginx/using-nginx-plus-with-selinux) when upgrading to version 6.6. + +**Note:** i386 and x86_64 support only, no aarch64 or ppc64le + +## Supported Deployment Environments + +- Bare metal +- Container +- Public cloud: AWS, Google Cloud Platform, MicrosoftAzure +- Virtual Machine diff --git a/content/nap-dos/_index.md b/content/nap-dos/_index.md new file mode 100644 index 000000000..058171b30 --- /dev/null +++ b/content/nap-dos/_index.md @@ -0,0 +1,8 @@ +--- +description: "F5 NGINX App Protect DoS provides behavioral DoS detection and mitigation.\n\nRequest + your [free 30-day trial](https://www.nginx.com/free-trial-request/) today. \n" +title: F5 NGINX App Protect DoS +url: /nginx-app-protect-dos/ +cascade: + logo: "NGINX-App-Protect-DoS-product-icon.svg" +--- diff --git a/content/nap-dos/deployment-guide/_index.md b/content/nap-dos/deployment-guide/_index.md new file mode 100644 index 000000000..ff74352e2 --- /dev/null +++ b/content/nap-dos/deployment-guide/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn how to deploy NGINX App Protect DoS. +menu: + docs: + parent: NGINX App Protect DoS Documentation. +title: Deployment Guide +weight: 100 +url: /nginx-app-protect-dos/deployment-guide/ +--- + diff --git a/content/nap-dos/deployment-guide/installing-nginx-plus-with-dos-and-waf-on-amazon-web-services.md b/content/nap-dos/deployment-guide/installing-nginx-plus-with-dos-and-waf-on-amazon-web-services.md new file mode 100644 index 000000000..01c590f28 --- /dev/null +++ b/content/nap-dos/deployment-guide/installing-nginx-plus-with-dos-and-waf-on-amazon-web-services.md @@ -0,0 +1,198 @@ +--- +description: Install F5 NGINX Plus, NGINX App Protect WAF + DoS on Amazon Web Services + (AWS), to provide sophisticated Layer 7 load balancing, Modern app security solution, + behavioral DoS detection and mitigation that works seamlessly in DevOps environments + for your apps running on CentOS, RHEL, Debian and Ubuntu Linux OS. +docs: DOCS-1204 +doctypes: +- task +title: NGINX App Protect WAF + DoS AMIs on Amazon EC2 +toc: true +weight: 110 +--- + +NGINX, Inc. participates in the Amazon Web Services (AWS) Partner Network as a Standard Technology Partner. We offer Amazon Machine Images (AMIs) for use in the Amazon Elastic Compute Cloud (EC2), available at the AWS Marketplace for several operating systems, including Amazon Linux, Red Hat Enterprise Linux, and Ubuntu. + +The AMIs contain combination of the following components: + +- Latest version of [F5 NGINX Plus](https://www.f5.com/products/nginx/nginx-plus), optimized for use on Amazon EC2 + +- Latest version of [NGINX App Protect DoS](https://docs.nginx.com/nginx-app-protect-dos/), optimized for use on Amazon EC2 +- Latest version of [NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect-waf/), optimized for use on Amazon EC2 +- Pre-packaged software for building highly available (HA) NGINX Plus configurations + +## Installing the NGINX Plus NGINX App Protect WAF + DoS + +To quickly set up an environment with NGINX Plus, NGINX App Protect WAF and NGINX App Protect DoS on AWS: + +1. Follow the instructions in [Getting Started with Amazon EC2 Linux Instances](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html) to sign up on AWS and get more information about EC2 itself. +2. Proceed to the product page for the appropriate AMI at the AWS Marketplace, and launch the AMI. + + - [NGINX Plus with NGINX App Protect DoS – RHEL 7 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-bjdboufufnb7g?sr=0-4&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect DoS – RHEL8 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-e6bifer7o6uzm?sr=0-13&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect DoS – CentOS 7 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-deeny2oe7izti?sr=0-12&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect DoS – Debian 11 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-56oveh2rsxsbq?sr=0-2&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect DoS – Ubuntu 20.04 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-gsoln2vnsgpr4?sr=0-5&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect DoS – Ubuntu 22.04 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-l6f2q2ykrjufy?sr=0-13&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect WAF + DoS – RHEL 7 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-jedbygo6xbvto?sr=0-1&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect WAF + DoS – RHEL 8 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-6pvnoyr2mp2co?sr=0-18&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect WAF + DoS – CentOS 7 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-jedbygo6xbvto?sr=0-1&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect WAF + DoS – Debian 11 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-wbyobl7a55vcu?sr=0-3&ref_=beagle&applicationId=AWSMPContessa) + + - [NGINX Plus with NGINX App Protect WAF + DoS – Ubuntu 20.04 Linux AMI HVM](https://aws.amazon.com/marketplace/pp/prodview-zhxmqlcoylkca?sr=0-2&ref_=beagle&applicationId=AWSMPContessa) + + Click the **Continue to Subscribe** button to proceed to the **Launch on EC2** page. + +3. Select the type of launch by clicking the appropriate tab (1‑Click Launch, **Manual Launch**, or **Service Catalog**). Choose the desired options for billing, instance size, and so on, and click the Accept Software Terms… button. +4. When configuring the firewall rules, add a rule to accept web traffic on TCP ports 80 and 443 (this happens automatically if you launch from the 1-Click Launch tab). +5. As soon as the new EC2 instance launches, NGINX Plus starts automatically and serves a default **index.html** page. To view the page, use a web browser to access the public DNS name of the new instance. You can also check the status of the NGINX Plus server by logging into the EC2 instance and running this command: + + ```nginx + /etc/init.d/nginx status + ``` + + See [NGINX Plus on the AWS Cloud](https://www.nginx.com/resources/datasheets/nginx-quick-start-guide-for-aws/) deployment guide for details. + +6. Verify latest NGINX PLUS / NGINX App Protect DoS / NGINX App Protect WAF packages are installed on EC2 after its first start: + + + Verify NGINX App Protect WAF latest release from is + + installed by comparing with installed version from following command on the EC2 machine + + ```shell + cat /opt/app_protect/VERSION /opt/app_protect/RELEASE + ``` + + Verify NGINX DoS latest release from is + + installed by comparing with installed version from following command on the EC2 machine + + ```shell + admd -v + ``` + + Verify NGINX Plus latest release from is + installed by comparing with installed version from following command on the EC2 machine + + ```shell + nginx -v + ``` + + In case NGINX PLUS / NGINX App Protect DoS / NGINX App Protect WAF packages are not latest release then upgrade the following with these commands: + + + For App Protect DoS solution based on RedHat / CentOS + + ```shell + sudo service nginx stop + sudo yum install app-protect-dos + sudo systemctl start nginx + ``` + + For App Protect DoS solution based on Debian / Ubuntu + + ```shell + sudo service nginx stop + sudo apt-get update + sudo apt-get install app-protect-dos + sudo service nginx start + ``` + + For App Protect WAF solution based on RedHat / CentOS + + ```shell + sudo service nginx stop + sudo yum install app-protect + sudo systemctl start nginx + ``` + + For App Protect WAF solution based on Debian / Ubuntu + + ```shell + sudo service nginx stop + sudo apt-get update + sudo apt-get install app-protect + sudo service nginx start + ``` + +7. If AMI includes [NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect-waf/) + + To enable NGINX App Protect WAF use the following steps: + + a. Load the NGINX App Protect WAF module on the main context in the `nginx.conf` file: + + ```shell + load_module modules/ngx_http_app_protect_module.so; + ``` + + b. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` file: + + ```shell + app_protect_enable on; + ``` + + c. Restart the NGINX service: + + ```shell + sudo systemctl restart nginx + ``` + + For more configuration information follow [NGINX App Protect WAF Configuration Guide](https://docs.nginx.com/nginx-app-protect-waf/configuration-guide/configuration/). + + + +8. If AMI includes [NGINX App Protect DoS](https://docs.nginx.com/nginx-app-protect-dos/) + + To enable NGINX App Protect DoS use the following steps: + + a. Load the NGINX App Protect DoS module on the main context in the `nginx.conf` file: + + ```shell + load_module modules/ngx_http_app_protect_dos_module.so; + ``` + + b. Enable NGINX App Protect DoS on an `http/server/location` context in the `nginx.conf` file: + + ```shell + app_protect_dos_enable on; + app_protect_dos_name "App1"; + app_protect_dos_policy_file "/etc/app_protect_dos/BADOSDefaultPolicy.json"; + app_protect_dos_monitor uri=serv:80/; # Assuming server_name "serv" on port 80, with the root path "/" + ``` + + c. Enable the L4 accelerated mitigation feature (for Debian11/Ubuntu20.04/RHEL8) in the `http` context of the `nginx.conf` file: + + ```shell + app_protect_dos_accelerated_mitigation on; + ``` + + d. Restart the NGINX service: + + ```shell + sudo systemctl restart nginx + ``` + + For more configuration information follow [NGINX App Protect DoS Directives and Policy](https://docs.nginx.com/nginx-app-protect-dos/directives-and-policy/learn-about-directives-and-policy/). + + + +## What If I Need Help? + +If you encounter any problems with NGINX Plus configuration, documentation is available at [nginx.org](https://nginx.org/en/docs/) and in the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/). + +If you encounter any problems with NGINX App Protect DoS configuration, documentation is available at the [NGINX App Protect DoS Troubleshooting Guide](https://docs.nginx.com/nginx-app-protect-dos/troubleshooting-guide/how-to-troubleshoot/). + +If you encounter any problems with NGINX App Protect WAF configuration, documentation is available at the [NGINX App Protect WAF Troubleshooting Guide](https://docs.nginx.com/nginx-app-protect-waf/v4/troubleshooting-guide/troubleshooting/). + + +Customers who purchase an NGINX Plus AMI at the AWS Marketplace are eligible for the AWS support provided by the NGINX, Inc. engineering team. To activate support, submit the [AMI Support Activation](https://www.nginx.com/ami-support-activation/) form (you need your AWS account number). When you request support, we’ll ask you to provide the AWS account number that you registered, along with the IDs of your EC2 instances in some cases. diff --git a/content/nap-dos/deployment-guide/learn-about-deployment.md b/content/nap-dos/deployment-guide/learn-about-deployment.md new file mode 100644 index 000000000..f2b35d160 --- /dev/null +++ b/content/nap-dos/deployment-guide/learn-about-deployment.md @@ -0,0 +1,2444 @@ +--- +description: Learn about F5 NGINX App Protect DoS Deployment. +docs: DOCS-666 +doctypes: +- task +title: NGINX App Protect DoS Deployment +toc: true +weight: 100 +--- + +## Overview + +F5 NGINX App Protect DoS provides behavioral protection against DoS for your web applications.

      +This guide explains how to deploy NGINX App Protect DoS as well as upgrade App Protect DoS. + +## Prerequisites + +NGINX App Protect DoS is available to the customers as a downloadable dynamic module at an additional cost. To purchase or add NGINX App Protect DoS to an existing NGINX Plus subscription, contact the NGINX sales team. + +NGINX Plus Release 24 and later supports NGINX App Protect DoS. + +NGINX App Protect DoS supports the following operating systems: + +- [CentOS 7.4.x and above](#centos-74-installation) (Deprecated starting from NGINX Plus R33) +- [RHEL 7.4.x and above](#rhel-74-installation) (Deprecated starting from NGINX Plus R33) +- [RHEL 8.1.x / Rocky Linux 8 and above](#rhel-8--rocky-linux-8-installation) +- [RHEL 9 and above](#rhel-9-installation) +- [Debian 10 (Buster)](#debian--ubuntu-installation) - (Deprecated starting from NGINX Plus R28) +- [Debian 11 (Bullseye)](#debian--ubuntu-installation) +- [Debian 12 (Bookworm)](#debian--ubuntu-installation) +- [Ubuntu 18.04 (Bionic)](#debian--ubuntu-installation) - (Deprecated starting from NGINX Plus R30) +- [Ubuntu 20.04 (Focal)](#debian--ubuntu-installation) +- [Ubuntu 22.04 (Jammy)](#debian--ubuntu-installation) +- [Ubuntu 24.04 (Noble)](#debian--ubuntu-installation) +- [Alpine 3.15](#alpine-315x--317x--319x-installation) - (Deprecated starting from NGINX Plus R30) +- [Alpine 3.17](#alpine-315x--317x--319x-installation) +- [Alpine 3.19](#alpine-315x--317x--319x-installation) + +The NGINX App Protect DoS package has the following dependencies: + +1. **nginx-plus-module-appprotectdos** - NGINX Plus dynamic module for App Protect DoS +2. **libcurl** - Software library for HTTP access +3. **zeromq4** - Software library for fast, message-based applications +4. **boost** - The free peer-reviewed portable C++ source libraries +5. **openssl** - Toolkit for the Transport Layer Security (TLS) and Secure Sockets Layer (SSL) protocol +6. **libelf** - Software library for ELF access + +See the NGINX Plus full list of prerequisites for more details. NGINX App Protect DoS can be installed as a module to an existing NGINX Plus installation or as a complete NGINX Plus with App Protect DoS installation in a clean environment or to a system with NGINX App Protect WAF. + +{{< note >}} + +- gRPC, HTTP/2 and WebSocket protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for the attack to be detected. +- TLS fingerprint feature is not used in CentOS 7.4 and RHEL 7 / UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. +- Monitor directive `app_protect_dos_monitor` with proxy_protocol parameter can not be configured on Ubuntu 18.04. As a result, gRPC and HTTP/2 DoS protection for proxy_protocol configuration is not supported. +{{< /note >}} + +## Platform Security Considerations + +When deploying App Protect DoS on NGINX Plus take the following precautions to secure the platform. This avoids the risk of causing a Denial of Service condition or compromising the platform security. + +- Restrict permissions to the files on the NGINX App Protect DoS platform to user **nginx** and group **nginx**, especially for the sensitive areas containing the configuration. +- Remove unnecessary remote access services on the platform. +- Configure a Syslog destination on the same machine as App Protect DoS and proxy to an external destination. This avoids eavesdropping and [man-in-the-middle](https://en.wikipedia.org/wiki/Man-in-the-middle_attack) attacks on the Syslog channel. + +## CentOS 7.4+ Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the NGINX [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the CentOS server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo yum install ca-certificates epel-release wget + ``` + +6. Add NGINX Plus and NGINX App Protect DoS repository: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-7.repo + ``` + +7. In case of fresh installation, update the repository and install the most recent version of the NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo yum install app-protect-dos + ``` + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo yum --showduplicates list app-protect-dos + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo yum install app-protect-dos-27+2.4.0 + ``` + +8. In case of upgrading from previously installed NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo yum remove nginx-plus + sudo yum install app-protect-dos + sudo systemctl start nginx + ``` + + {{< note >}} Make sure to restore configuration from `/etc/nginx-plus-backup` back to `/etc/nginx-plus`.{{< /note >}} + +9. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +10. Check the NGINX App Protect DoS binary version to ensure that you have the right version installed correctly: + + ```shell + sudo admd -v + ``` + +11. Load the NGINX App Protect DoS module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_dos_module.so; + ``` + +12. Enable NGINX App Protect DoS on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_dos_enable on; + app_protect_dos_name "App1"; + app_protect_dos_monitor uri=serv:80/; # Assuming server_name "serv" on port 80, with the root path "/" + ``` + +13. Configure the SELinux to allow NGINX App Protect DoS: + + a. Using the vi editor, create a file: + + ```shell + vi app-protect-dos.te + ``` + + b. Insert the following contents into the file created above: + + ```shell + module app-protect-dos 2.0; + require { + type unconfined_t; + type unconfined_service_t; + type httpd_t; + type tmpfs_t; + type initrc_t; + type initrc_state_t; + class capability sys_resource; + class shm { associate read unix_read unix_write write }; + class file { read write }; + } + allow httpd_t initrc_state_t:file { read write }; + allow httpd_t self:capability sys_resource; + allow httpd_t tmpfs_t:file { read write }; + allow httpd_t unconfined_service_t:shm { associate read unix_read unix_write write }; + allow httpd_t unconfined_t:shm { associate read write unix_read unix_write }; + allow httpd_t initrc_t:shm { associate read unix_read unix_write write }; + ``` + + c. Run the following chain of commands: + + ```shell + sudo checkmodule -M -m -o app-protect-dos.mod app-protect-dos.te + sudo semodule_package -o app-protect-dos.pp -m app-protect-dos.mod + sudo semodule -i app-protect-dos.pp; + ``` + + If you encounter any issues, refer to the [Troubleshooting Guide]({{< relref "/nap-dos/troubleshooting-guide/how-to-troubleshoot.md" >}}). + + {{< note >}}Additional SELinux configuration may be required to allow NGINX Plus to listen on specific network ports, connect to upstreams, and send syslog entries to remote systems. Refer to the practices outlined in the [Using NGINX and NGINX Plus with SELinux](https://www.nginx.com/blog/using-nginx-plus-with-selinux/) article for details.{{< /note >}} + +14. To enable the NGINX/App-Protect-DoS service to start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +15. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +## RHEL 7.4+ Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the NGINX [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the CentOS server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo yum install ca-certificates wget + +6. Enable Yum repositories to pull App Protect DoS dependencies: + + If you have a RHEL subscription: + + ```shell + sudo subscription-manager repos --enable rhel-*-optional-rpms \ + --enable rhel-*-extras-rpms \ + --enable rhel-ha-for-rhel-*-server-rpms + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + + If you don’t have a RHEL subscription, you can pull the dependencies from the CentOS repository: Create a new repository centos.repo in `/etc/yum.repos.d/` with the content: + + ```shell + [centos] + name=CentOS-7 + baseurl=http://ftp.heanet.ie/pub/centos/7/os/x86_64/ + enabled=1 + gpgcheck=1 + gpgkey=http://ftp.heanet.ie/pub/centos/7/os/x86_64/RPM-GPG-KEY-CentOS-7 + [epel] + name=epel packages for CentOS/RHEL 7 + baseurl=https://dl.fedoraproject.org/pub/epel/7/x86_64 + enabled=1 + gpgcheck=1 + gpgkey=https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 + [extras] + name=extras packages for CentOS/RHEL 7 + mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=extras + enabled=1 + gpgcheck=1 + gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7 + ``` + +7. Add NGINX Plus and NGINX App Protect DoS repository: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-7.repo + ``` + +8. In case of fresh installation, update the repository and install the most recent version of the NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo yum install app-protect-dos + ``` + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo yum --showduplicates list app-protect-dos + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo yum install app-protect-dos-27+2.4.0 + ``` + +9. In case of upgrading from previously installed NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo yum remove nginx-plus + sudo yum install app-protect-dos + sudo systemctl start nginx + ``` + + {{< note >}} Make sure to restore configuration from `/etc/nginx-plus-backup` back to `/etc/nginx-plus`.{{< /note >}} + +10. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +11. Check the App Protect DoS binary version to ensure that you have the right version installed correctly: + + ```shell + sudo admd -v + ``` + +12. Load the NGINX App Protect DoS module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_dos_module.so; + ``` + +13. Enable NGINX App Protect DoS on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_dos_enable on; + app_protect_dos_name "App1"; + app_protect_dos_monitor uri=serv:80/; # Assuming server_name "serv" on port 80, with the root path "/" + ``` + +14. Configure the SELinux to allow NGINX App Protect DoS: + + a. Using the vi editor, create a file: + + ```shell + vi app-protect-dos.te + ``` + + b. Insert the following contents into the file created above: + + ```shell + module app-protect-dos 2.0; + require { + type unconfined_t; + type unconfined_service_t; + type httpd_t; + type tmpfs_t; + type initrc_t; + type initrc_state_t; + class capability sys_resource; + class shm { associate read unix_read unix_write write }; + class file { read write }; + } + allow httpd_t initrc_state_t:file { read write }; + allow httpd_t self:capability sys_resource; + allow httpd_t tmpfs_t:file { read write }; + allow httpd_t unconfined_service_t:shm { associate read unix_read unix_write write }; + allow httpd_t unconfined_t:shm { associate read write unix_read unix_write }; + allow httpd_t initrc_t:shm { associate read unix_read unix_write write }; + ``` + + c. Run the following chain of commands: + + ```shell + sudo checkmodule -M -m -o app-protect-dos.mod app-protect-dos.te && \ + sudo semodule_package -o app-protect-dos.pp -m app-protect-dos.mod && \ + sudo semodule -i app-protect-dos.pp; + ``` + + If you encounter any issues, refer to the [Troubleshooting Guide]({{< relref "/nap-dos/troubleshooting-guide/how-to-troubleshoot.md" >}}). + + {{< note >}}Additional SELinux configuration may be required to allow NGINX Plus to listen on specific network ports, connect to upstreams, and send syslog entries to remote systems. Refer to the practices outlined in the [Using NGINX and NGINX Plus with SELinux](https://www.nginx.com/blog/using-nginx-plus-with-selinux/) article for details.{{< /note >}} + +15. To enable the NGINX/App-Protect-DoS service to start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +16. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +## RHEL 8+ / Rocky Linux 8 Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the NGINX [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the CentOS server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo dnf install ca-certificates wget + +6. Enable Yum repositories to pull NGINX App Protect DoS dependencies: + + If you have a RHEL subscription: + + ```shell + sudo subscription-manager repos --enable=rhel-8-for-x86_64-baseos-rpms + sudo subscription-manager repos --enable=rhel-8-for-x86_64-appstream-rpms + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` + +7. Add NGINX Plus and NGINX App Protect DoS repository: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-8.repo + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-8.repo + ``` + +8. In case of fresh installation, update the repository and install the most recent version of the NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo dnf install app-protect-dos + ``` + + For L4 accelerated mitigation feature (RHEL 8.6+): + + ```shell + sudo dnf install app-protect-dos-ebpf-manager + ``` + + {{< note >}} + L4 accelerated mitigation feature (RHEL 8.6+): + - `app-protect-dos-ebpf-manager` run with root privileges. + {{< /note >}} + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo dnf --showduplicates list app-protect-dos + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo dnf install app-protect-dos-27+2.4.0 + ``` + +9. In case of upgrading from previously installed NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo dnf remove nginx-plus + sudo dnf install app-protect-dos + sudo systemctl start nginx + ``` + + {{< note >}} Make sure to restore configuration from `/etc/nginx-plus-backup` back to `/etc/nginx-plus`.{{< /note >}} + +10. Confirm the NGINX binary version to make sure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +11. Check the App Protect DoS binary version to ensure that you have the right version installed correctly: + + ```shell + sudo admd -v + ``` + +12. Load the NGINX App Protect DoS module on the main context in the `nginx.conf` file: + + ```nginx + load_module modules/ngx_http_app_protect_dos_module.so; + ``` + +13. Enable NGINX App Protect DoS in an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_dos_enable on; + app_protect_dos_name "App1"; + app_protect_dos_monitor uri=serv:80/; # Assuming server_name "serv" on port 80, with the root path "/" + ``` + +14. Enable the L4 accelerated mitigation feature (RHEL 8.6+) in an `http` context in the `nginx.conf` file: + + ```nginx + app_protect_dos_accelerated_mitigation on; + ``` + +15. Configure the SELinux to allow App Protect DoS: + + a. Using the vi editor, create a file: + + ```shell + vi app-protect-dos.te + ``` + + b. Insert the following contents into the file that you have created: + + ```shell + module app-protect-dos 2.0; + require { + type unconfined_t; + type unconfined_service_t; + type httpd_t; + type tmpfs_t; + type initrc_t; + type initrc_state_t; + class capability sys_resource; + class shm { associate read unix_read unix_write write }; + class file { read write }; + } + allow httpd_t initrc_state_t:file { read write }; + allow httpd_t self:capability sys_resource; + allow httpd_t tmpfs_t:file { read write }; + allow httpd_t unconfined_service_t:shm { associate read unix_read unix_write write }; + allow httpd_t unconfined_t:shm { associate read write unix_read unix_write }; + allow httpd_t initrc_t:shm { associate read unix_read unix_write write }; + ``` + + c. Run the following chain of commands: + + ```shell + sudo checkmodule -M -m -o app-protect-dos.mod app-protect-dos.te && \ + sudo semodule_package -o app-protect-dos.pp -m app-protect-dos.mod && \ + sudo semodule -i app-protect-dos.pp; + ``` + + For L4 accelerated mitigation feature: + + a. Using the vi editor, create a file: + + ```shell + vi app-protect-dos-ebpf-manager.te + ``` + + b. Insert the following contents into the file you have created: + + ```shell + module app-protect-dos-ebpf-manager 1.0; + require { + type root_t; + type httpd_t; + type unconfined_service_t; + class sock_file write; + class unix_stream_socket connectto; + class shm { unix_read unix_write }; + } + allow httpd_t root_t:sock_file write; + allow httpd_t unconfined_service_t:shm { unix_read unix_write }; + allow httpd_t unconfined_service_t:unix_stream_socket connectto; + ``` + + c. Run the following chain of commands: + + ```shell + sudo checkmodule -M -m -o app-protect-dos-ebpf-manager.mod app-protect-dos-ebpf-manager.te && \ + sudo semodule_package -o app-protect-dos-ebpf-manager.pp -m app-protect-dos-ebpf-manager.mod && \ + sudo semodule -i app-protect-dos-ebpf-manager.pp; + ``` + + If you encounter any issues, refer to the [Troubleshooting Guide]({{< relref "/nap-dos/troubleshooting-guide/how-to-troubleshoot.md" >}}). + + {{< note >}}Additional SELinux configuration may be required to allow NGINX Plus to listen on specific network ports, connect to upstreams, and send syslog entries to remote systems. Refer to the practices outlined in the [Using NGINX and NGINX Plus with SELinux](https://www.nginx.com/blog/using-nginx-plus-with-selinux/) article for details.{{< /note >}} + +16. To enable the NGINX/App-Protect-DoS service to start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +17. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +18. L4 mitigation + + To enable the `app-protect-dos-ebpf-manager` service to start at boot, run the command: + ```shell + sudo systemctl enable nginx.service + ``` + Start the `app-protect-dos-ebpf-manager` service: + ``` + sudo systemctl start app-protect-dos-ebpf-manager + ``` + +## RHEL 9+ Installation + +1. If you already have NGINX packages on your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the NGINX [Customer Portal](https://my.f5.com) and download the following two files: + - nginx-repo.key + - nginx-repo.crt + +4. Copy the downloaded files to the CentOS server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo dnf install ca-certificates wget + +6. Enable the yum repositories to pull NGINX App Protect DoS dependencies: + + If you have a RHEL subscription: + + ```shell + sudo subscription-manager repos --enable=rhel-9-for-x86_64-baseos-rpms + sudo subscription-manager repos --enable=rhel-9-for-x86_64-appstream-rpms + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +7. Add the NGINX Plus and NGINX App Protect DoS repositories: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/plus-9.repo + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-9.repo + ``` + +8. If you are performing a fresh installation, update the repository and install the most recent version of the NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo dnf install app-protect-dos + ``` + + For L4 accelerated mitigation feature (RHEL 9): + + ```shell + sudo dnf install app-protect-dos-ebpf-manager + ``` + + {{< note >}} + L4 accelerated mitigation feature (RHEL 9): + - `app-protect-dos-ebpf-manager` run with root privileges. + {{< /note >}} + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo dnf --showduplicates list app-protect-dos + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo dnf install app-protect-dos-32+4.4.0 + ``` + +9. In you are upgrading from previously installed NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo dnf remove nginx-plus + sudo dnf install app-protect-dos + sudo systemctl start nginx + ``` + + {{< note >}} Make sure to restore configuration from `/etc/nginx-plus-backup` back to `/etc/nginx-plus`.{{< /note >}} + +10. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +11. Check the App Protect DoS binary version to ensure that you have the right version installed correctly: + + ```shell + sudo admd -v + ``` + +12. Load the NGINX App Protect DoS module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_dos_module.so; + ``` + +13. Enable NGINX App Protect DoS on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_dos_enable on; + app_protect_dos_name "App1"; + app_protect_dos_monitor uri=serv:80/; # Assuming server_name "serv" on port 80, with the root path "/" + ``` + +14. Enable the L4 accelerated mitigation feature (RHEL 8.6+) in the `http` context of the `nginx.conf` file: + + ```nginx + app_protect_dos_accelerated_mitigation on; + ``` + +15. Configure the SELinux to allow App Protect DoS: + + a. Using the vi editor, create a file: + + ```shell + vi app-protect-dos.te + ``` + + b. Insert the following contents into the file created above: + + ```shell + module app-protect-dos 2.0; + require { + type unconfined_t; + type unconfined_service_t; + type httpd_t; + type tmpfs_t; + type initrc_t; + type initrc_state_t; + class capability sys_resource; + class shm { associate read unix_read unix_write write }; + class file { read write }; + } + allow httpd_t initrc_state_t:file { read write }; + allow httpd_t self:capability sys_resource; + allow httpd_t tmpfs_t:file { read write }; + allow httpd_t unconfined_service_t:shm { associate read unix_read unix_write write }; + allow httpd_t unconfined_t:shm { associate read write unix_read unix_write }; + allow httpd_t initrc_t:shm { associate read unix_read unix_write write }; + ``` + + c. Run the following chain of commands: + + ```shell + sudo checkmodule -M -m -o app-protect-dos.mod app-protect-dos.te && \ + sudo semodule_package -o app-protect-dos.pp -m app-protect-dos.mod && \ + sudo semodule -i app-protect-dos.pp; + ``` + + For L4 accelerated mitigation feature:
      + a. Using the vi editor, create a file: + + ```shell + vi app-protect-dos-ebpf-manager.te + ``` + + b. Insert the following contents into the file created above: + + ```shell + module app-protect-dos-ebpf-manager 1.0; + require { + type root_t; + type httpd_t; + type unconfined_service_t; + class sock_file write; + class unix_stream_socket connectto; + class shm { unix_read unix_write }; + } + allow httpd_t root_t:sock_file write; + allow httpd_t unconfined_service_t:shm { unix_read unix_write }; + allow httpd_t unconfined_service_t:unix_stream_socket connectto; + ``` + + c. Run the following chain of commands: + + ```shell + sudo checkmodule -M -m -o app-protect-dos-ebpf-manager.mod app-protect-dos-ebpf-manager.te && \ + sudo semodule_package -o app-protect-dos-ebpf-manager.pp -m app-protect-dos-ebpf-manager.mod && \ + sudo semodule -i app-protect-dos-ebpf-manager.pp; + ``` + + If you encounter any issues, refer to the [Troubleshooting Guide]({{< relref "/nap-dos/troubleshooting-guide/how-to-troubleshoot.md" >}}). + + {{< note >}}Additional SELinux configuration may be required to allow NGINX Plus to listen on specific network ports, connect to upstreams, and send syslog entries to remote systems. Refer to the practices outlined in the [Using NGINX and NGINX Plus with SELinux](https://www.nginx.com/blog/using-nginx-plus-with-selinux/) article for details.{{< /note >}} + +16. To enable the NGINX/App-Protect-DoS service to start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +17. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +18. L4 mitigation + + To enable the `app-protect-dos-ebpf-manager` service to start at boot, run the command: + ```shell + sudo systemctl enable nginx.service + ``` + Start the `app-protect-dos-ebpf-manager` service: + ``` + sudo systemctl start app-protect-dos-ebpf-manager + ``` + + +## Debian / Ubuntu Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the NGINX [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the Debian server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install apt utils: + + For Debian: + + ```shell + sudo apt-get install apt-transport-https lsb-release ca-certificates wget gnupg2 debian-archive-keyring + ``` + + For Ubuntu: + + ```shell + sudo apt-get install apt-transport-https lsb-release ca-certificates wget gnupg2 ubuntu-keyring + ``` + + {{< note >}}In case the apt installation or database update fails due to release info change, run the below command before you install.{{< /note >}} + + ```shell + sudo apt-get update --allow-releaseinfo-change + ``` + +6. Download and add the NGINX signing key: + + ```shell + sudo wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | sudo gpg --dearmor | sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + ``` + +7. Add NGINX Plus and NGINX App Protect DoS repository: + + For Debian: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-plus.list + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect-dos/debian `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-app-protect-dos.list + ``` + + For Ubuntu: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/plus/ubuntu `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-plus.list + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect-dos/ubuntu `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-app-protect-dos.list + ``` + +8. Download the apt configuration to `/etc/apt/apt.conf.d`: + + ```shell + sudo wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + ``` + +9. In case of fresh Installation, update the repository and install the most recent version of the NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo apt-get update + sudo apt-get install app-protect-dos + ``` + + For L4 accelerated mitigation feature (Debian 11 / Debian 12 / Ubuntu 20.04 / Ubuntu 22.04 / Ubuntu 24.04): + + ```shell + sudo apt-get install app-protect-dos-ebpf-manager + ``` + + {{< note >}} + L4 accelerated mitigation feature (Debian 11 / Debian 12 / Ubuntu 20.04 / Ubuntu 22.04 / Ubuntu 24.04): + - `app-protect-dos-ebpf-manager` run with root privileges. + {{< /note >}} + + Alternatively, to install a specific version, use the following commands to update and list available versions: + + ```shell + sudo apt-get update + sudo apt-cache policy app-protect-dos + ``` + + Finally, install a specific version from the output of command above. + + For example for Debian 10: + + ```shell + sudo apt-get install app-protect-dos=27+2.4.0-1~buster nginx-plus-module-appprotectdos=27+2.4.0-1~buster + ``` + + For example for Debian 11: + + ```shell + sudo apt-get install app-protect-dos=33+4.5.0-1~bullseye nginx-plus-module-appprotectdos=33+4.5.0--1~bullseye + ``` + + For example, for Debian 12: + + ```shell + sudo apt-get install app-protect-dos=33+4.5.0-1~bookworm nginx-plus-module-appprotectdos=32+4.5.0-1~bookworm + ``` + + For example for Ubuntu 18.04: + + ```shell + sudo apt-get install app-protect-dos=27+2.4.0-1~bionic nginx-plus-module-appprotectdos=27+2.4.0-1~bionic + ``` + + For example for Ubuntu 20.04: + + ```shell + sudo apt-get install app-protect-dos=33+4.5.0-1~focal nginx-plus-module-appprotectdos=32+4.5.0-1~focal + ``` + + For example for Ubuntu 22.04: + + ```shell + sudo apt-get install app-protect-dos=33+4.5.0-1~jammy nginx-plus-module-appprotectdos=32+4.5.0-1~jammy + ``` + + For example for Ubuntu 24.04: + + ```shell + sudo apt-get install app-protect-dos=33+4.5.0-1~noble nginx-plus-module-appprotectdos=33+4.5.1-1~noble + ``` + +10. In the case of upgrading from a previously installed NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo apt-get update + sudo apt-get remove nginx-plus + sudo apt-get install app-protect-dos + sudo service nginx start + ``` + +11. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +12. Check the App Protect DoS binary version to ensure that you have the right version installed correctly: + + ```shell + sudo admd -v + ``` + +13. Load the NGINX App Protect DoS module on the main context in the `nginx.conf` file: + + ```nginx + load_module modules/ngx_http_app_protect_dos_module.so; + ``` + +14. Enable NGINX App Protect DoS on an `http/server/location` context in the `nginx.conf` via: + + ```nginx + app_protect_dos_enable on; + app_protect_dos_name "App1"; + app_protect_dos_monitor uri=serv:80/; # Assuming server_name "serv" on port 80, with the root path "/" + ``` + +15. Enable the L4 accelerated mitigation feature (Debian 11 / Debian 12 / Ubuntu 20.04 / Ubuntu 22.04) on the `http` context of the `nginx.conf` file: + + ```nginx + app_protect_dos_accelerated_mitigation on; + ``` + +16. Start the NGINX service: + + ```shell + sudo service nginx start + ``` +17. Start the L4 service: + ```shell + sudo service app-protect-dos-ebpf-manager start + ``` + +## Alpine 3.15.x / 3.17.x / 3.19.x Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Log in to the NGINX [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +3. Upload `nginx-repo.key` to `/etc/apk/cert.key` and `nginx-repo.crt` to `/etc/apk/cert.pem`. + Make sure that files do not contain other certificates and keys, as Alpine Linux does not support mixing client certificates for different repositories. + +4. Add the NGINX public signing key to the directory `/etc/apk/keys`: + + ```shell + sudo wget -O /etc/apk/keys/nginx_signing.rsa.pub https://cs.nginx.com/static/keys/nginx_signing.rsa.pub + ``` + +5. Remove any previously configured NGINX Plus repository: + + ```shell + sed "/plus-pkgs.nginx.com/d" /etc/apk/repositories + ``` + +6. Add NGINX Plus repository to `/etc/apk/repositories` file: + + ```shell + printf "https://pkgs.nginx.com/plus/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | sudo tee -a /etc/apk/repositories + ``` + +7. Add NGINX App Protect DoS repository to `/etc/apk/repositories` file: + + ```shell + printf "https://pkgs.nginx.com/app-protect-dos/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | sudo tee -a /etc/apk/repositories + ``` + +8. It is recommended to remove all community-supported NGINX packages. Note that all NGINX modules will be removed as well. + + ```shell + sudo apk del -r app-protect-dos + sudo apk del -r nginx + ``` + +9. Update the repository and install the most recent version of the NGINX Plus and NGINX App Protect DoS: + + ```shell + sudo apk update + sudo apk add nginx-plus app-protect-dos + ``` + + For L4 accelerated mitigation feature: + + ```shell + sudo sudo apk add app-protect-dos-ebpf-manager + ``` + + {{< note >}} + L4 accelerated mitigation feature: + - `app-protect-dos-ebpf-manager` run with root privileges. + {{< /note >}} + + Alternatively, to install a specific version, use the following commands to update and list available versions: + + ```shell + sudo apk update + sudo apk info app-protect-dos + ``` + + Finally, install a specific version from the output of command above. For example: + + ```shell + sudo apk add nginx-plus app-protect-dos=33+4.5.0-r1 + ``` + +10. In case of upgrading from previously installed NGINX Plus App Protect DoS package (which includes NGINX Plus): + + ```shell + sudo apk update + sudo apk del -r app-protect-dos + sudo apk del -r nginx-plus + sudo apk add nginx-plus app-protect-dos + rc-service nginx-app-protect-dos start + ``` + +11. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +12. Check the App Protect DoS binary version to ensure that you have the right version installed correctly: + + ```shell + sudo admd -v + ``` + +13. Load the NGINX App Protect DoS module on the main context in the `nginx.conf` file: + + ```nginx + load_module modules/ngx_http_app_protect_dos_module.so; + ``` + +14. Enable NGINX App Protect DoS on an `http/server/location` context in the `nginx.conf` via: + + ```nginx + app_protect_dos_enable on; + app_protect_dos_name "App1"; + app_protect_dos_monitor uri=serv:80/; # Assuming server_name "serv" on port 80, with the root path "/" + ``` + +15. Enable the L4 accelerated mitigation feature on the `http` context of the `nginx.conf` file: + + ```nginx + app_protect_dos_accelerated_mitigation on; + ``` + +16. Start the NGINX service: + + ```shell + rc-service nginx-app-protect-dos start + ``` + +17. Start the L4 service: + ```shell + rc-service app-protect-dos-ebpf-manager start + ``` + +## Docker Deployment + +### Docker Deployment Instructions + +You need root permissions to execute the following steps. + +1. Create a Dockerfile (see examples below) which copies the following files into the docker image: + + - `nginx-repo.crt`: Certificate for NGINX repository access + - `nginx-repo.key`: Private key for NGINX repository access + - `nginx.conf`: User defined `nginx.conf` with `app-protect-dos` enabled + - `entrypoint.sh`: Docker startup script which spins up all App Protect DoS processes, must have executable permissions + +2. Log in to NGINX Plus Customer Portal and download your `nginx-repo.crt` and `nginx-repo.key` files. + +3. Copy the files to the directory where the Dockerfile is located. + +4. Add NGINX App Protect DoS to your `nginx.conf`. The configuration below is an example for an `http` and `grpc+tls` servers which has NGINX App Protect DoS enabled. Note that every NGINX App Protect DoS related directive starts with `app_protect_dos_`. + + `nginx.conf` + + ```nginx + user nginx; + worker_processes auto; + error_log /var/log/nginx/error.log error; + worker_rlimit_nofile 65535; + working_directory /tmp/cores; + + load_module modules/ngx_http_app_protect_dos_module.so; # NGINX App Protect DoS module + + events { + worker_connections 65535; + } + + http { + include /etc/nginx/mime.types; + + log_format log_napd ', vs_name_al=$app_protect_dos_vs_name, ip=$remote_addr, tls_fp=$app_protect_dos_tls_fp, ' + 'outcome=$app_protect_dos_outcome, reason=$app_protect_dos_outcome_reason, ' + 'ip_tls=$remote_addr:$app_protect_dos_tls_fp, '; + + app_protect_dos_security_log_enable on; # Enable NGINX App Protect DoS's security logger + app_protect_dos_security_log "/etc/app_protect_dos/log-default.json" /var/log/adm/logger.log; # Security logger outputs to a file + # app_protect_dos_security_log "/etc/app_protect_dos/log-default.json" syslog:server=1.2.3.4:5261; # Security logger outputs to a syslog destination + + # HTTP/1 server + server { + default_type application/octet-stream; + listen 80 reuseport; + server_name serv80; + + set $loggable '0'; + access_log /var/log/nginx/access.log log_napd if=$loggable; # Access log with rate limiting and additional information + # access_log syslog:server=1.1.1.1:5561 log_napd if=$loggable; + + app_protect_dos_policy_file "/etc/app_protect_dos/BADOSDefaultPolicy.json"; # Policy configuration for NGINX App Protect DoS + + location / { + app_protect_dos_enable on; # Enable NGINX App Protect DoS in this block + app_protect_dos_name "App80"; # PO name + app_protect_dos_monitor uri=http://serv80/; # Health monitoring + proxy_pass http://1.2.3.4:80; + } + } + + # gRPC server with ssl + server { + default_type application/grpc; + listen 443 http2 ssl reuseport; + server_name serv_grpc; + + # TLS config + ssl_certificate /etc/ssl/certs/grpc.example.com.crt; + ssl_certificate_key /etc/ssl/private/grpc.example.com.key; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 5m; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_protocols TLSv1.2 TLSv1.3; + + set $loggable '0'; + access_log /var/log/nginx/access.log log_napd if=$loggable; + #access_log syslog:server=1.1.1.1:5561 log_napd if=$loggable; + + location / { + app_protect_dos_enable on; + app_protect_dos_name "AppGRPC"; + app_protect_dos_monitor uri=https://serv_grpc:443/service/method protocol=grpc; # mandatory for gRPC + grpc_pass grpc://1.2.3.4:1001; + } + } + + sendfile on; + tcp_nopush on; + keepalive_timeout 65; + } + ``` + + {{< important >}} + Make sure to replace upstream and proxy pass directives in this example with relevant application backend settings. + {{< /important >}} + +5. In the same directory create an `entrypoint.sh` file with executable permissions, with the following content: + + For CentOS 7 / UBI 7: + + ```shell + #!/usr/bin/env bash + + USER=nginx + LOGDIR=/var/log/adm + + # prepare environment + mkdir -p /var/run/adm /tmp/cores ${LOGDIR} + chmod 755 /var/run/adm /tmp/cores ${LOGDIR} + chown ${USER}:${USER} /var/run/adm /tmp/cores ${LOGDIR} + + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rpm/lib64 + export LD_LIBRARY_PATH + + # run processes + /bin/su -s /bin/bash -c "/usr/bin/adminstall > ${LOGDIR}/adminstall.log 2>&1" ${USER} + /usr/sbin/nginx -g 'daemon off;' & + /bin/su -s /bin/bash -c "/usr/bin/admd -d --log info > ${LOGDIR}/admd.log 2>&1 &" ${USER} + ``` + + For Alpine / Debian / Ubuntu / UBI 8/ UBI 9: + + ```shell + #!/usr/bin/env bash + + USER=nginx + LOGDIR=/var/log/adm + + # prepare environment + mkdir -p /var/run/adm /tmp/cores ${LOGDIR} + chmod 755 /var/run/adm /tmp/cores ${LOGDIR} + chown ${USER}:${USER} /var/run/adm /tmp/cores ${LOGDIR} + + # run processes + /bin/su -s /bin/bash -c "/usr/bin/adminstall > ${LOGDIR}/adminstall.log 2>&1" ${USER} + /usr/sbin/nginx -g 'daemon off;' & + /bin/su -s /bin/bash -c "/usr/bin/admd -d --log info > ${LOGDIR}/admd.log 2>&1 &" ${USER} + ``` + +6. Create a Docker image: + + ```shell + docker build --no-cache -t app-protect-dos . + ``` + + The `--no-cache` option tells Docker to build the image from scratch and ensures the installation of the latest version of NGINX Plus and NGINX App Protect DoS. If the Dockerfile was previously used to build an image without the `--no-cache` option, the new image uses versions from the previously built image from the Docker cache. + +7. Verify that the `app-protect-dos` image was created successfully with the docker images command: + + ```shell + docker images app-protect-dos + ``` + +8. Create a container based on this image, for example, `my-app-protect-dos` container: + + ```shell + docker run --name my-app-protect-dos -p 80:80 -d app-protect-dos + ``` + +9. Verify that the `my-app-protect-dos` container is up and running with the `docker ps` command: + + ```shell + docker ps + ``` + +10. L4 Accelerated Mitigation Deployment Options:
      + There are three different ways to deploy the L4 accelerated mitigation feature:
      + 1. Deploy in a Dedicated Container.
      + Create a shared folder on the host: + ```shell + mkdir /shared + ``` + This folder will be used to share data between containers. + Modify the `entrypoint.sh` to run the L4 mitigation: + + ```shell + # run processes + /usr/bin/ebpf_manager_dos + ``` + + Create and run the L4 container: + ```shell + docker run --privileged --network host --mount type=bind,source=/sys/fs/bpf,target=/sys/fs/bpf -v /shared:/shared --name my-app-protect-dos-ebpf-manager -d app-protect-dos-ebpf-manager + ``` + + Create and run the main `app-protect-dos` container: + ```shell + docker run --name my-app-protect-dos -v /shared:/shared -p 80:80 -d app-protect-dos + ``` + 2. Deploy Directly on the Host.
      + To run L4 mitigation directly on the host:
      + 1. Install the L4 mitigation on the host, as described in the OS-specific instructions. + 2. Run the app-protect-dos container: + ```shell + docker run --name my-app-protect-dos -v /shared:/shared -p 80:80 -d app-protect-dos + ``` + 3. Run L4 Mitigation Inside the Same Container as `app-protect-dos`.
      + To run both L4 mitigation and the main application within the same container:
      + 1. Modify the `entrypoint.sh`: + ```shell + ... + # run processes + /usr/bin/ebpf_manager_dos & + ... + ``` + 2. run the container: + ```shell + docker run --name my-app-protect-dos -p 80:80 -d app-protect-dos + ``` + + {{< note >}} + L4 accelerated mitigation feature: + - `app-protect-dos-ebpf-manager` need to run with root privileges. + {{< /note >}} + +### CentOS 7.4 Docker Deployment Example + +```dockerfile +# For CentOS 7: +FROM centos:7.4.1708 + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Install prerequisite packages: +RUN yum -y install wget ca-certificates epel-release + +# Add NGINX Plus and NGINX App Protect DoS repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-7.repo + +# Install NGINX App Protect DoS: +RUN yum -y install app-protect-dos \ + && yum clean all \ + && rm -rf /var/cache/yum \ + && rm -rf /etc/ssl/nginx + +# Copy configuration files: +COPY nginx.conf /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + +### UBI7 Docker Deployment Example + +```Dockerfile +FROM registry.access.redhat.com/ubi7:ubi + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Setup the Redhat subscription +RUN subscription-manager register --force --org=${RHEL_ORG} --activationkey=${RHEL_ACTIVATION_KEY} +RUN subscription-manager refresh +RUN subscription-manager attach --auto + +# Install prerequisite packages: +RUN yum -y install wget ca-certificates + +# Install dependencies +RUN subscription-manager repos --enable rhel-*-optional-rpms \ + --enable rhel-*-extras-rpms \ + --enable rhel-ha-for-rhel-*-server-rpms +RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + +# Add NGINX Plus and NGINX App Protect DoS repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-7.repo + +# Install NGINX App Protect DoS: +RUN yum -y install app-protect-dos \ + && yum clean all \ + && rm -rf /var/cache/yum \ + && rm -rf /etc/ssl/nginx + +# Copy configuration files: +COPY nginx.conf /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + +### RHEL 8 / Rocky Linux 8 Docker Deployment Example + +```Dockerfile +# For UBI 8 +FROM registry.access.redhat.com/ubi8:ubi + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Setup the Redhat subscription +RUN subscription-manager register --force --org=${RHEL_ORG} --activationkey=${RHEL_ACTIVATION_KEY} +RUN subscription-manager refresh +RUN subscription-manager attach --auto + +# Setup repos and Install dependencies +RUN subscription-manager repos --enable=rhel-8-for-x86_64-baseos-rpms +RUN subscription-manager repos --enable=rhel-8-for-x86_64-appstream-rpms +RUN dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + +# Install prerequisite packages: +RUN dnf -y install wget ca-certificates + +# Add NGINX Plus and NGINX App Protect DoS repo to Yum: https://cs.nginx.com/static/files/nginx-plus-8.4.repo +RUN wget -P /etc/yum.repos.d +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-8.repo + +# Install NGINX App Protect DoS: +RUN dnf -y install app-protect-dos \ + && dnf clean all \ + && rm -rf /var/cache/yum \ + && rm -rf /etc/ssl/nginx + +# Copy configuration files: +COPY nginx.conf /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + +### RHEL 9 Docker Deployment Example + +```Dockerfile +# For RHEL ubi9: +FROM registry.access.redhat.com/ubi9/ubi + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Setup the Redhat subscription +RUN subscription-manager register --force --org=${RHEL_ORG} --activationkey=${RHEL_ACTIVATION_KEY} +RUN subscription-manager refresh +RUN subscription-manager attach --auto + +# Setup repos and Install dependencies +RUN subscription-manager repos --enable=rhel-9-for-x86_64-baseos-rpms +RUN subscription-manager repos --enable=rhel-9-for-x86_64-appstream-rpms +RUN dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + +# Install prerequisite packages: +RUN dnf -y install wget ca-certificates + +# Add NGINX Plus repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/plus-9.repo + +# Add NGINX App-protect & dependencies repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-9.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo \ + # You can use either of the dependencies or epel repo + # && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm \ + && dnf clean all + +# Install NGINX App Protect DoS: +RUN dnf -y install app-protect-dos \ + && dnf clean all \ + && rm -rf /var/cache/yum \ + && rm -rf /etc/ssl/nginx + +# Copy configuration files: +COPY nginx.conf /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + + +### Debian 10 (Buster) / Debian 11 (Bullseye) / Debian 12 (Bookworm) Docker Deployment Example + +```Dockerfile + +ARG OS_CODENAME +# Where OS_CODENAME can be: buster/bullseye/bookworm + +FROM debian:${OS_CODENAME} + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Install prerequisite packages: +RUN apt-get update && apt-get install -y apt-transport-https lsb-release ca-certificates wget gnupg2 debian-archive-keyring + +# Download and add the NGINX signing key: +RUN wget https://cs.nginx.com/static/keys/nginx_signing.key && apt-key add nginx_signing.key +RUN wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | gpg --dearmor | tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + +# Add NGINX Plus and NGINX App Protect DoS repository: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-plus.list +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect-dos/debian `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-app-protect-dos.list + +# Download the apt configuration to `/etc/apt/apt.conf.d`: +RUN wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + +# Update the repository and install the most recent version of the NGINX App Protect package (which includes NGINX Plus): +RUN apt-get update && apt-get install -y app-protect-dos + +# Remove nginx repository key/cert from docker +RUN rm -rf /etc/ssl/nginx + +# Copy configuration files: +COPY nginx.conf /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + +### Ubuntu 18.04 (Bionic) / 20.04 (Focal) / 22.04 (Jammy) / 24.04 (Noble) Docker Deployment Example + +```Dockerfile + +ARG OS_CODENAME +# Where OS_CODENAME can be: bionic/focal/jammy/noble + +FROM ubuntu:${OS_CODENAME} + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Install prerequisite packages: +RUN apt-get update && apt-get install -y apt-transport-https lsb-release ca-certificates wget gnupg2 ubuntu-keyring + +# Download and add the NGINX signing key: +RUN wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | gpg --dearmor | tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + +# Add NGINX Plus and NGINX App Protect DoS repository: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg]https://pkgs.nginx.com/plus/ubuntu `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-plus.list +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect-dos/ubuntu `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-app-protect-dos.list + +# Download the apt configuration to `/etc/apt/apt.conf.d`: +RUN wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + +# Update the repository and install the most recent version of the NGINX App Protect DoS package (which includes NGINX Plus): +RUN apt-get update && apt-get install -y app-protect-dos + +# Remove nginx repository key/cert from docker +RUN rm -rf /etc/ssl/nginx + +# Copy configuration files: +COPY nginx.conf /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + +### Alpine Docker Deployment Example + +```Dockerfile +# For Alpine 3.15 / 3.17 / 3.19: +ARG OS_CODENAME +# Where OS_CODENAME can be: 3.15 / 3.17 / 3.19 +FROM alpine:${OS_CODENAME} + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Download and add the NGINX signing key: +RUN wget -O /etc/apk/keys/nginx_signing.rsa.pub https://cs.nginx.com/static/keys/nginx_signing.rsa.pub + +# Add NGINX Plus repository: +RUN printf "https://pkgs.nginx.com/plus/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | tee -a /etc/apk/repositories + +# Add NGINX App Protect DoS repository: +RUN printf "https://pkgs.nginx.com/app-protect-dos/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | tee -a /etc/apk/repositories + +# Add prerequisite packages +RUN apk update && apk add bash + +# Update the repository and install the most recent version of the NGINX App Protect DoS package (which includes NGINX Plus): +RUN --mount=type=secret,id=nginx-crt,dst=/etc/apk/cert.pem,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/apk/cert.key,mode=0644 \ + apk update && apk add nginx-plus app-protect-dos + +# Copy configuration files: +COPY nginx.conf /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + + +## Docker Deployment with NGINX App Protect + +### Docker Deployment Instructions + +You need root permissions to execute the following steps. + +1. Create a Dockerfile (see examples below) which copies the following files into the docker image: + + - `nginx-repo.crt`: Certificate for NGINX repository access + - `nginx-repo.key`: Private key for NGINX repository access + - `nginx.conf`: User defined `nginx.conf` with `app-protect-dos` enabled + - `entrypoint.sh`: Docker startup script which spins up all App Protect DoS processes, must have executable permissions + +2. Log in to NGINX Plus Customer Portal and download your `nginx-repo.crt` and `nginx-repo.key` files. + +3. Copy the files to the directory where the Dockerfile is located. + +4. In the same directory create the `nginx.conf` file with the following contents: + + ```nginx + user nginx; + worker_processes auto; + error_log /var/log/nginx/error.log error; + worker_rlimit_nofile 65535; + working_directory /tmp/cores; + + load_module modules/ngx_http_app_protect_module.so; + load_module modules/ngx_http_app_protect_dos_module.so; + + events { + worker_connections 65535; + + } + + http { + include /etc/nginx/mime.types; + + log_format log_napd ', vs_name_al=$app_protect_dos_vs_name, ip=$remote_addr, tls_fp=$app_protect_dos_tls_fp, ' + 'outcome=$app_protect_dos_outcome, reason=$app_protect_dos_outcome_reason, ' + 'ip_tls=$remote_addr:$app_protect_dos_tls_fp, '; + + app_protect_dos_security_log_enable on; + app_protect_dos_security_log "/etc/app_protect_dos/log-default.json" /var/log/adm/logger.log; + #app_protect_dos_security_log "/etc/app_protect_dos/log-default.json" syslog:server=1.2.3.4:5261; + + # HTTP/1 server + server { + default_type application/octet-stream; + listen 80 reuseport; + server_name serv80; + proxy_http_version 1.1; + + app_protect_policy_file "/etc/app_protect/conf/NginxDefaultPolicy.json"; + app_protect_security_log_enable on; + + set $loggable '0'; + access_log /var/log/nginx/access.log log_napd if=$loggable; + #access_log syslog:server=1.1.1.1:5561 log_napd if=$loggable; + app_protect_dos_policy_file "/etc/app_protect_dos/BADOSDefaultPolicy.json"; + + location / { + app_protect_dos_enable on; + app_protect_dos_name "App80"; + app_protect_dos_monitor uri=http://serv80/; + + proxy_pass http://1.2.3.4:80; + } + } + + # gRPC server with ssl + server { + default_type application/grpc; + listen 443 http2 ssl reuseport; + server_name serv_grpc; + + # TLS config + ssl_certificate /etc/ssl/certs/grpc.example.com.crt; + ssl_certificate_key /etc/ssl/private/grpc.example.com.key; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 5m; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_protocols TLSv1.2 TLSv1.3; + + set $loggable '0'; + access_log /var/log/nginx/access.log log_napd if=$loggable; + #access_log syslog:server=1.1.1.1:5561 log_napd if=$loggable; + + location / { + app_protect_dos_enable on; + app_protect_dos_name "AppGRPC"; + app_protect_dos_monitor uri=https://serv_grpc:443/service/method protocol=grpc; # mandatory for gRPC + grpc_pass grpc://1.2.3.4:1001; + } + } + + sendfile on; + tcp_nopush on; + keepalive_timeout 65; + } + ``` + +{{< important >}} +Make sure to replace upstream and proxy pass directives in this example with relevant application backend settings. +{{< /important >}} + +5. For the L4 accelerated mitigation feature:
      + The following line in the `nginx.conf` file needs to be modified:
      + Change: + ```nginx + user nginx; + ``` + To: + ```nginx + user root; + ``` + +5. In the same directory create an `entrypoint.sh` file with executable permissions, with the following content: + + For CentOS 7 / UBI 7: + + ```shell + #!/usr/bin/env bash + USER=nginx + LOGDIR=/var/log/adm + + # prepare environment + mkdir -p /var/run/adm /tmp/cores ${LOGDIR} + chmod 755 /var/run/adm /tmp/cores ${LOGDIR} + chown ${USER}:${USER} /var/run/adm /tmp/cores ${LOGDIR} + + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rpm/lib64 + export LD_LIBRARY_PATH + + # run processes + /bin/su -s /bin/bash -c "/usr/bin/adminstall > ${LOGDIR}/adminstall.log 2>&1" ${USER}/bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' ${USER} + /bin/su -s /bin/bash -c "/usr/share/ts/bin/bd-socket-plugin tmm_count 4 proc_cpuinfo_cpu_mhz 2000000 total_xml_memory 307200000 total_umu_max_size 3129344 sys_max_account_id 1024 no_static_config 2>&1 > /var/log/app_protect/bd-socket-plugin.log &" ${USER} + /usr/sbin/nginx -g 'daemon off;' & + /bin/su -s /bin/bash -c "/usr/bin/admd -d --log info > ${LOGDIR}/admd.log 2>&1 &" ${USER} + ``` + + For Alpine / Debian / Ubuntu / UBI 8/ UBI 9: + + ```shell + #!/usr/bin/env bash + USER=nginx + LOGDIR=/var/log/adm + + # prepare environment + mkdir -p /var/run/adm /tmp/cores ${LOGDIR} + chmod 755 /var/run/adm /tmp/cores ${LOGDIR} + chown ${USER}:${USER} /var/run/adm /tmp/cores ${LOGDIR} + + # run processes + /bin/su -s /bin/bash -c "/usr/bin/adminstall > ${LOGDIR}/adminstall.log 2>&1" ${USER}/bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' ${USER} + /bin/su -s /bin/bash -c "/usr/share/ts/bin/bd-socket-plugin tmm_count 4 proc_cpuinfo_cpu_mhz 2000000 total_xml_memory 307200000 total_umu_max_size 3129344 sys_max_account_id 1024 no_static_config 2>&1 > /var/log/app_protect/bd-socket-plugin.log &" ${USER} + /usr/sbin/nginx -g 'daemon off;' & + /bin/su -s /bin/bash -c "/usr/bin/admd -d --log info > ${LOGDIR}/admd.log 2>&1 &" ${USER} + ``` + +6. Create a Docker image: + + For CentOS: + + ```shell + docker build --no-cache -t app-protect-dos . + ``` + + For RHEL: + + ```shell + docker build --build-arg RHEL_ORGANIZATION=${RHEL_ORGANIZATION} --build-arg RHEL_ACTIVATION_KEY=${RHEL_ACTIVATION_KEY} --no-cache -t app-protect-dos . + ``` + + The `--no-cache` option tells Docker to build the image from scratch and ensures the installation of the latest version of NGINX Plus and NGINX App Protect DoS. If the Dockerfile was previously used to build an image without the `--no-cache` option, the new image uses versions from the previously built image from the Docker cache. + +7. Verify that the `app-protect-dos` image was created successfully with the docker images command: + + ```shell + docker images app-protect-dos + ``` + +8. Create a container based on this image, for example, `my-app-protect-dos` container: + + ```shell + docker run --name my-app-protect-dos -p 80:80 -d app-protect-dos + ``` + +9. Verify that the `my-app-protect-dos` container is up and running with the `docker ps` command: + + ```shell + docker ps + ``` + +### Centos 7.4 Docker Deployment Example + +```Dockerfile +# For CentOS 7: +FROM centos:7.4.1708 + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Install prerequisite packages: +RUN yum -y install wget ca-certificates epel-release + +# Add NGINX Plus, NGINX App Protect DoS and NGINX App Protect repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-7.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + +# Install NGINX App Protect DoS and NGINX App Protect: +RUN yum -y install app-protect-dos app-protect\ + && yum clean all \ + && rm -rf /var/cache/yum \ + && rm -rf /etc/ssl/nginx + + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + +### RHEL 7.4 Docker Deployment Example + +```Dockerfile +# For Red Hat 7.4+: +FROM registry.access.redhat.com/rhel7:7.4 + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Setup the Red Hat subscription +RUN subscription-manager register --force --org=${RHEL_ORG} --activationkey=${RHEL_ACTIVATION_KEY} +RUN subscription-manager refresh +RUN subscription-manager attach --auto + +# Install prerequisite packages +RUN yum -y install wget ca-certificates + +# Install dependencies +RUN subscription-manager repos --enable rhel-*-optional-rpms \ + --enable rhel-*-extras-rpms \ + --enable rhel-ha-for-rhel-*-server-rpms +RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + +# Add NGINX Plus, NGINX App Protect DoS and NGINX App Protect repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-dos-7.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + +# Install NGINX App Protect DoS and NGINX App Protect: +RUN yum -y install app-protect-dos app-protect\ + && yum clean all \ + && rm -rf /var/cache/yum \ + && rm -rf /etc/ssl/nginx + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + +### Debian 10 (Buster) / Debian 11 (Bullseye) / Debian 12 (Bookworm) Docker Deployment Example + +```Dockerfile + +ARG OS_CODENAME +# Where OS_CODENAME can be: buster/bullseye/bookworm + +FROM debian:${OS_CODENAME} + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Install prerequisite packages: +RUN apt-get update && apt-get install -y apt-transport-https lsb-release ca-certificates wget gnupg2 debian-archive-keyring + +# Download and add the NGINX signing key: +RUN wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | gpg --dearmor | tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + +# Add NGINX Plus, NGINX App Protect and NGINX App Protect DoS repository: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-plus.list +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect-dos/debian `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-app-protect-dos.list +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect/debian `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-app-protect.list + +# Download the apt configuration to `/etc/apt/apt.conf.d`: +RUN wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + +# Update the repository and install the most recent version of the NGINX App Protect DoS and NGINX App Protect package (which includes NGINX Plus): +RUN apt-get update && apt-get install -y app-protect-dos app-protect + +# Remove nginx repository key/cert from docker +RUN rm -rf /etc/ssl/nginx + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + +### Ubuntu 18.04 (Bionic) / 20.04 (Focal) / 22.04 (Jammy) / 24.04 (Noble) Docker Deployment Example + +```Dockerfile + +ARG OS_CODENAME +# Where OS_CODENAME can be: bionic/focal/jammy/noble + +FROM ubuntu:${OS_CODENAME} + +ARG DEBIAN_FRONTEND=noninteractive + +# Download certificate and key from the customer portal (https://my.f5.com) +# and copy to the build context: +COPY nginx-repo.crt nginx-repo.key /etc/ssl/nginx/ + +# Install prerequisite packages: +RUN apt-get update && apt-get install -y apt-transport-https lsb-release ca-certificates wget gnupg2 ubuntu-keyring + +# Download and add the NGINX signing key: +RUN wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | gpg --dearmor | tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + +# Add NGINX Plus, NGINX App Protect and NGINX App Protect DoS repository: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/plus/ubuntu `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-plus.list +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect-dos/ubuntu `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-app-protect-dos.list +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect/ubuntu `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-app-protect.list + +# Download the apt configuration to `/etc/apt/apt.conf.d`: +RUN wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + +# Update the repository and install the most recent version of the NGINX App Protect DoS and NGINX App Protect package (which includes NGINX Plus): +RUN apt-get update && apt-get install -y app-protect-dos app-protect + +# Remove nginx repository key/cert from docker +RUN rm -rf /etc/ssl/nginx + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD /root/entrypoint.sh && tail -f /dev/null +``` + +## NGINX App Protect DoS Arbitrator + +### Overview + +NGINX App Protect DoS arbitrator orchestrates all the running NGINX App Protect DoS instances to synchronize local/global attack start/stop. + +NGINX App Protect DoS arbitrator serves as a central coordinating component for managing multiple instances of App Protect DoS in a network. It is needed when there are more than one NGINX App Protect DoS instances. Its primary function is to ensure that all instances are aware of and share the same state for each protected object. Here's a clearer breakdown of how it works and why it's necessary: + +How NGINX App Protect DoS Arbitrator Works: + +- **Collecting State Periodically**: The arbitrator regularly collects the state information from all running instances of App Protect DoS. This collection occurs at set intervals, typically every 10 seconds. +- **State Initialization for New Instances**: When a new App Protect DoS instance is created, it doesn't start with a blank or uninitialized state for a protected object. Instead, it retrieves the initial state for the protected object from the arbitrator. +- **Updating State in Case of an Attack**: If an attack is detected by one of the App Protect DoS instances, that instance sends an attack notification to the arbitrator. The arbitrator then updates the state of the affected protected object to indicate that it is under attack. Importantly, this updated state is propagated to all other instances. + +### Why NGINX App Protect DoS Arbitrator is Necessary + +NGINX App Protect DoS Arbitrator is essential for several reasons: + +- **Global State Management**: Without the arbitrator, each individual instance of App Protect DoS would manage its own isolated state for each protected object. This isolation could lead to inconsistencies. For example, if instance A declared an attack on a protected object named "PO-Example," instance B would remain unaware of this attack, potentially leaving the object vulnerable. +- **Uniform Attack Detection**: With the arbitrator in place, when instance A detects an attack on "PO-Example" and reports it to the arbitrator, the state of "PO-Example" is immediately updated to indicate an attack. This means that all instances, including instance B, are aware of the attack and can take appropriate measures to mitigate it. + +In summary, NGINX App Protect DoS Arbitrator acts as a central coordinator to maintain a consistent and up-to-date global state for protected objects across multiple instances of App Protect DoS. This coordination helps ensure that attacks are properly detected and mitigated, and that knowledge gained by one instance is efficiently shared with others, enhancing the overall security of the network. + + +### NGINX App Protect DoS Arbitrator Deployment + +1. Pull the official NGINX App Protect DoS Arbitrator image with the command: + + ```shell + docker pull docker-registry.nginx.com/nap-dos/app_protect_dos_arb:latest + ``` + +2. Create a container based on this image, for example, `app-protect-dos-arb` container: + + ```shell + docker run --name app_protect_dos_arb -p 3000:3000 -d docker-registry.nginx.com/nap-dos/app_protect_dos_arb + ``` + +3. Verify that the `app-protect-dos-arb` container is up and running with the `docker ps` command. + +4. DNS records are required for NGINX App Protect DoS Arbitrator to work properly and be accessible by NGINX App Protect DoS servers. Ensure that the `svc-appprotect-dos-arb` or configured Arbitrator FQDN (with `app_protect_dos_arb_fqdn` directive) has a valid DNS resolution. +This step is necessary only for VM/Docker deployments with arbitrator. When the arbitrator is in the same Kubernetes namespace as NGINX App Protect DoS, this step is not needed. + +### Multi-VM Deployment + +The Arbitrator service is standalone. Once it is down, it can be seamlessly re-started. It will immediately recover all the needed information from NGINX App Protect DoS instances that communicate to it every 10 sec. It’s downtime is around 10-20 seconds which will not affect the NGINX App Protect DoS working. + +NGINX App Protect DoS Arbitrator service connects to port 3000 and can be seen under App Protect DoS instances. All modules try to connect to this service automatically. If it’s not accessible, each instance works in standalone mode. + +There is no such option for authentications between NGINX App Protect DoS servers and Arbitrator service like MTLS or password . Currently Arbitrator service is not exposed outside of the namespace. It is customers responsibility to isolate it from outside. It is applicable to any deployment of Arbitrator, not only to multi-VM. + +## Post-Installation Checks + +You can run the following commands to ensure that NGINX App Protect DoS enforcement is operational. + +1. Check that the three processes needed for NGINX App Protect DoS are running using `ps aux`: + + - admd + - nginx: master process + - nginx: worker process + + ```shell + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + nginx 7759 0.0 0.0 113120 1200 ? Ss Sep06 0:00 /bin/sh -c /usr/bin/admd -d --log info > /var/log/adm/admd.log 2>&1 + root 7765 0.0 0.0 87964 1464 ? Ss Sep06 0:00 nginx: master process /usr/sbin/nginx -g daemon off; + nginx 7767 0.0 0.1 615868 8188 ? Sl Sep06 0:04 nginx: worker process + ``` + +2. Verify that there are no NGINX errors in the `/var/log/nginx/error.log` and that the policy compiled successfully: + + ```shell + 2020/09/07 15:33:44 [notice] 9307#9307: using the "epoll" event method + 2020/09/07 15:33:44 [notice] 9307#9307: nginx/1.19.0 (nginx-plus-r22) + 2020/09/07 15:33:44 [notice] 9307#9307: built by gcc 4.8.5 20150623 (Red Hat 4.8.5-39) (GCC) + 2020/09/07 15:33:44 [notice] 9307#9307: OS: Linux 3.10.0-327.28.3.el7.x86_64 + 2020/09/07 15:33:44 [notice] 9307#9307: getrlimit(RLIMIT_NOFILE): 1024:4096 + 2020/09/07 15:33:44 [notice] 9310#9310: start worker processes + 2020/09/07 15:33:44 [notice] 9310#9310: start worker process 9311 + PID <9311>, WORKER <0>, Function adm_ngx_init_process, line 684, version: 22+1.19.4-1.el7.ngx + ``` + +3. Check that by applying an attack, the attacker IP addresses are blocked while the good traffic pass through: + + a. Simulate good traffic: + + ```shell + echo "Start Good Traffic 2" + while true; do + curl ${VS}/good1 & + curl ${VS}/good2 & + curl ${VS}/good3 & + curl ${VS}/good4 + sleep 0.1 + done & + ``` + + b. After 7 minutes start the attack: + + ```shell + while [ true ] + do + ab -B ${BAD_IP1} -l -r -n 1000000 -c 150 -d -H "Host: evil.net" -H "Pragma: no-cache" -H "Cache-Control: no-cache" -H "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8" -H "Upgrade-Insecure-Requests: 1" -H "User-Agent: WireXBot" -H "x-requested-with:" -H "Referer: http://10.0.2.1/none.html" -H "Accept-Encoding: gzip, deflate" -H "Accept-Language: en-US" http://${VS}/ & + ab -B ${BAD_IP2} -l -r -n 1000000 -c 150 -d -H "Host: evil.net" -H "Pragma: no-cache" -H "Cache-Control: no-cache" -H "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8" -H "Upgrade-Insecure-Requests: 1" -H "User-Agent: WireXBot" -H "x-requested-with:" -H "Referer: http://10.0.2.1/none.html" -H "Accept-Encoding: gzip, deflate" -H "Accept-Language: en-US" http://${VS}/ & + ab -B ${BAD_IP3} -l -r -n 1000000 -c 150 -d -s 10 -H "Host: evil.net" -H "Pragma: no-cache" -H "Cache-Control: no-cache" -H "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8" -H "Upgrade-Insecure-Requests: 1" -H "User-Agent: WireXBot" -H "x-requested-with:" -H "Referer: http://10.0.2.1/none.html" -H "Accept-Encoding: gzip, deflate" -H "Accept-Language: en-US" http://${VS}/ + + killall ab + done + ``` + + c. See that the good traffic continue as usual while the attackers receive denial of service. + +To check NGINX App Protect WAF along side NGINX App Protect DoS, just perform the normal tests as specified at [Admin Guide](https://docs.nginx.com/nginx-app-protect/admin-guide/) + +### Compatibility with NGINX Plus Releases + +A threat campaign package is compatible with the NGINX Plus release supported during the time the threat campaign package was released and with all future releases from that point in time on. In other words, it is not compatible with earlier App Protect DoS releases. Those older releases are not supported at this point in time so you will have to upgrade App Protect DoS to benefit from the support which includes Threat campaigns updates. + +## Upgrading App Protect DoS + +You can upgrade to the latest NGINX Plus and App Protect DoS versions by downloading and installing the latest NGINX App Protect DoS package. When upgrading from this package, App Protect DoS will be uninstalled and reinstalled. The old default security policy is deleted and the new default security policy is installed. If you have created a custom security policy, the policy persists and you will need to update `nginx.conf` and point to the custom security policy by referencing the json file (using the full path). + +If you upgrade your NGINX version outside of the App Protect DoS module, App Protect DoS will be uninstalled and you will need to reinstall it. You need to restart NGINX after an upgrade. + +## SELinux + +The default settings for Security-Enhanced Linux (SELinux) on modern Red Hat Enterprise Linux (RHEL) and related Linux distributions can be very strict, erring on the side of security rather than convenience. + +Although the App Protect DoS applies its SELinux policy module during installation, your specific configuration might be blocked unless you adjust the policy or modify file labels. + +### Modifying File Labels + +For example, if you plan to store your log configuration files in `/etc/logs` - you should change the default SELinux file context for this directory: + +```shell +semanage fcontext -a -t httpd_config_t /etc/logs +restorecon -Rv /etc/logs +``` + +### Syslog to Custom Port + +If you want to send logs to some unreserved port, you can use semanage to add the desired port (here, 35514) to the syslogd_port_t type: + +```shell +semanage port -a -t syslogd_port_t -p tcp 35514 +``` + +Review the syslog ports by entering the following command: + +```shell +semanage port -l | grep syslog +``` + +### Kubernetes Deployment Examples + +#### App Protect DoS + +`appprotect-dos.yaml`: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: appprotect-dos + namespace: appprotect-dos-wp-diff + labels: + app: appprotect-dos +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: appprotect-dos + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 2 + maxUnavailable: 1 + template: + metadata: + labels: + app: appprotect-dos + spec: + containers: + - name: centos-bados + image: example.com/centos_app_protect_dos_r24:latest + imagePullPolicy: Always + resources: + requests: + cpu: "200m" + memory: "500Mi" + limits: + cpu: "900m" + memory: "800Mi" + ports: + - containerPort: 80 + name: web + - containerPort: 8090 + name: probe + - containerPort: 8091 + name: probe500 + livenessProbe: + httpGet: + path: /app_protect_dos_liveness + port: 8090 + initialDelaySeconds: 0 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /app_protect_dos_readiness + port: 8090 + initialDelaySeconds: 0 + periodSeconds: 10 + volumeMounts: + - name: shared + mountPath: /shared/ + - name: conf + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: root-script + mountPath: /root/entrypoint.sh + subPath: entrypoint.sh + - name: log-default + mountPath: /etc/app_protect_dos/log-default.json + subPath: log-default.json + volumes: + - name: shared + persistentVolumeClaim: + claimName: pvc-appprotect-dos-shared + - name: conf + configMap: + name: cm-appprotect-dos-nginx + items: + - key: nginx.conf + path: nginx.conf + - name: root-script + configMap: + name: cm-appprotect-dos-entry + defaultMode: 0755 + items: + - key: entrypoint.sh + path: entrypoint.sh + - name: log-default + configMap: + name: cm-appprotect-dos-log-default + defaultMode: 0755 + items: + - key: log-default.json + path: log-default.json +``` + +`svc-appprotect-dos.yaml`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: svc-appprotect-dos + namespace: appprotect-dos-wp-diff + labels: + app: appprotect-dos +spec: + ports: + - name: app + port: 80 + protocol: TCP + nodePort: 80 + selector: + app: appprotect-dos + type: NodePort +``` + +`log-default.json`: + +```json +{ + "filter": { + "traffic-mitigation-stats": "all", + "bad-actors": "all", + "attack-signatures": "all" + } +} +``` + +`entrypoint.sh`: + +```shell +#!/usr/bin/env bash +USER=nginx +LOGDIR=/var/log/adm + +# prepare environment +mkdir -p /var/run/adm /tmp/cores ${LOGDIR} +chmod 755 /var/run/adm /tmp/cores ${LOGDIR} +chown ${USER}:${USER} /var/run/adm /tmp/cores ${LOGDIR} + +# run processes +/bin/su -s /bin/bash -c "/usr/bin/adminstall > ${LOGDIR}/adminstall.log 2>&1" ${USER} +/usr/sbin/nginx -g 'daemon off;' & +/bin/su -s /bin/bash -c "/usr/bin/admd -d --log info > ${LOGDIR}/admd.log 2>&1 &" ${USER} +``` + +`install.sh`: + +```shell +#!/bin/bash +set -ex +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +kubectl -n appprotect-dos-wp-diff create cm cm-appprotect-dos-nginx --from-file ${DIR}/nginx.conf +kubectl -n appprotect-dos-wp-diff create cm cm-appprotect-dos-entry --from-file ${DIR}/entrypoint.sh +kubectl -n appprotect-dos-wp-diff create cm cm-appprotect-dos-log-default --from-file ${DIR}/log-default.json +kubectl create -f ${DIR}/appprotect-dos.yaml +#kubectl create -f ${DIR}/svc-appprotect-dos.yaml +``` + +`nginx.conf`: + +```nginx +user nginx; +worker_processes 1; +error_log /var/log/nginx/error.log debug; +worker_rlimit_nofile 65535; +working_directory /tmp/cores; + +load_module modules/ngx_http_app_protect_dos_module.so; + +events { + worker_connections 65535; +} +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format log_napd ', vs_name_al=$app_protect_dos_vs_name, ip=$remote_addr, tls_fp=$app_protect_dos_tls_fp, ' + 'outcome=$app_protect_dos_outcome, reason=$app_protect_dos_outcome_reason, ' + 'ip_tls=$remote_addr:$app_protect_dos_tls_fp, '; + + app_protect_dos_security_log_enable on; + app_protect_dos_security_log "/etc/app_protect_dos/log-default.json" /var/log/adm/logger.log; + # app_protect_dos_security_log "/etc/app_protect_dos/log-default.json" syslog:server=1.2.3.4:5261; + + server { + listen 80 reuseport; + server_name serv; + + set $loggable '0'; + access_log /var/log/nginx/access.log log_napd if=$loggable; + # access_log syslog:server=1.1.1.1:5561 log_napd if=$loggable; + + app_protect_dos_policy_file "/etc/app_protect_dos/BADOSDefaultPolicy.json"; + + location / { + app_protect_dos_enable on; + app_protect_dos_name "App1"; + app_protect_dos_monitor uri=http://serv:80/ protocol=http1; + proxy_pass http://1.2.3.4:80; + } + } + + server { + listen 8090; + server_name probe; + + app_protect_dos_liveness on; # uri:/app_protect_dos_liveness port:8090 + app_protect_dos_readiness on; # uri:/app_protect_dos_readiness port:8090 + + location / { + proxy_pass http://localhost:8091; + } + } + + server { + listen 8091; + return 503; + } + + sendfile on; + tcp_nopush on; + keepalive_timeout 65; +} +``` + +#### App Protect DoS arb + +Arbitrator (arb) is an internal service that is essential for the scaling scenarios. The arbitrator service should be deployed in the same namespace as NGINX App Protect DoS. + +`appprotect-dos-arb.yaml`: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: appprotect-dos-arb + namespace: appprotect-dos-wp-diff +spec: + replicas: 1 + selector: + matchLabels: + app: appprotect-dos-arb + template: + metadata: + labels: + app: appprotect-dos-arb + spec: + containers: + - name: arb-svc + image: example.com/app_protect_dos_arb:latest + resources: + requests: + cpu: "200m" + memory: "500Mi" + limits: + cpu: "900m" + memory: "800Mi" + ports: + - containerPort: 3000 +``` + +`svc-appprotect-dos-arb.yaml`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: svc-appprotect-dos-arb + namespace: appprotect-dos-wp-diff +spec: + selector: + app: appprotect-dos-arb + ports: + - name: arb + port: 3000 + protocol: TCP + targetPort: 3000 + clusterIP: None +``` + +`install_appprotect-arb.sh`: + +```shell +#!/bin/bash + +set -ex +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +kubectl -n appprotect-dos-wp-diff apply -f ${DIR}/appprotect-dos-arb.yaml +kubectl -n appprotect-dos-wp-diff apply -f ${DIR}/svc-appprotect-dos-arb.yaml +``` + +`install NGINX App Protect DoS with ARB service`: + +```shell +#!/bin/bash + +set -ex +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +kubectl create ns appprotect-dos-wp-diff +${DIR}/appprotect-dos/install.sh +${DIR}/appprotect-dos-arb/install_appprotect-dos-arb.sh +``` diff --git a/content/nap-dos/directives-and-policy/_index.md b/content/nap-dos/directives-and-policy/_index.md new file mode 100644 index 000000000..c4cc3e3ea --- /dev/null +++ b/content/nap-dos/directives-and-policy/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn about the Directives and Policy attributes necessary to configure + F5 NGINX App Protect DoS. +menu: + docs: + parent: NGINX App Protect DoS Documentation. +title: Directives and Policy +weight: 120 +url: /nginx-app-protect-dos/directives-and-policy/ +--- diff --git a/content/nap-dos/directives-and-policy/learn-about-directives-and-policy.md b/content/nap-dos/directives-and-policy/learn-about-directives-and-policy.md new file mode 100644 index 000000000..bc49f731e --- /dev/null +++ b/content/nap-dos/directives-and-policy/learn-about-directives-and-policy.md @@ -0,0 +1,527 @@ +--- +title: NGINX App Protect DoS Directives and Policy +toc: true +weight: 120 +docs: DOCS-667 +--- + +## Introduction + +NGINX directives are specified in the `nginx.conf` file and are used to configure various modules of NGINX.
      +F5 NGINX App Protect DoS has its own set of directives, which follow the same rules as other NGINX directives, and are used to enable and configure its features.
      + +The table below provides a summary of all the F5 NGINX App Protect DoS directives.
      + +While only the first directive is mandatory for enabling NGINX App Protect DoS, it is recommended to use as many directives as possible to leverage the product’s full range of monitoring and application health detection capabilities. After adding these directives, ensure you reload NGINX and check the error log for any errors or warnings.
      + +## Directives table +Below is a summary of all NGINX App Protect DoS directives. Detailed descriptions of each directive can be found in the following sections. + + {{}} + +| Directive syntax | Options | Context | Description | Mandatory | Default | +|-----------------------------------------------------------------------------------------------------------|----------|----------|--------------|------------|-------------------------------------------------------------------------------------------------------------------------------------| +| [app_protect_dos_enable](#enable-directive-app_protect_dos_enable) | [on\|off] | http,
      server,
      location | Enable/Disable DoS protection | Yes | off | +| [app_protect_dos_policy_file](#policy-directive-app_protect_dos_policy_file) | [FILE-PATH] | http,
      server,
      location | Load DoS configuration from a policy file | No | `/etc/app_protect_dos/BADOSDefaultPolicy.json` | +| [app_protect_dos_name](#service-name-directive-app_protect_dos_name) | [SERVICE-NAME] | http,
      server,
      location | Name of protected object | No | **line_num-server_name**:*seq*-location_name

      (i.e. `30-backend:1-/abc`) | +| [app_protect_dos_monitor](#monitor-directive-app_protect_dos_monitor) | [uri=X] [protocol=Y] [timeout=Z] [proxy_protocol \| proxy_protocol=on\|off] | http,
      server,
      location | URI to monitor server's stress. Protocol and timeout are optional | Yes, unless its regular http1 traffic | uri - None
      protocol - http1
      timeout - 10 seconds for http1/websocket ; 5 seconds for http2/grpc
      proxy_protocol - off | +| [app_protect_dos_security_log_enable](#security-log-enable-directive-app_protect_dos_security_log_enable) | [on\|off] | http,
      server,
      location | Enable/Disable security logger | No | off | +| [app_protect_dos_security_log](#security-log-directive-app_protect_dos_security_log) | [LOG-CONFIG-FILE] [DESTINATION] | http,
      server,
      location | Security logger configuration. Second argument:
      "syslog:server={ip}:{port}" or
      "stderr" or
      "{absolute_file_path}" | No | `/etc/app_protect_dos/log-default.json stderr` | +| [app_protect_dos_liveness](#liveness-probe-directive-app_protect_dos_liveness) | [on\|off] [uri:URI] [port:PORT] | http | Liveness prob. Second and third arguments are optional | No | `off uri:/app_protect_dos_liveness port:8090` | +| [app_protect_dos_readiness](#readiness-probe-directive-app_protect_dos_readiness) | [on\|off] [uri:URI] [port:PORT] | http | Readiness prob. Second and third arguments are optional | No | `off uri:/app_protect_dos_readiness port:8090` | +| [app_protect_dos_arb_fqdn](#arbitrator-fqdn-directive-app_protect_dos_arb_fqdn) | [FQDN\|IP address] | http | Arbitrator FQDN/IP address | No | `svc-appprotect-dos-arb` | +| [app_protect_dos_api](#api-directive-app_protect_dos_api) | No arguments | location | Monitoring via Rest API (also includes the dashboard) | No | off | +| [app_protect_dos_accelerated_mitigation](#api-directive-app_protect_dos_api) | [on\|off] [syn_drop=on\|off]| http | Enable/Disable L4 accelerated mitigation. Second argument is optional | No | off syn_drop=off | +| [app_protect_dos_access_file](#access-file-directive-app_protect_dos_access_file) | [FILE-PATH] | http,
      server,
      location | Define allowlist policy from a file | No | None / disabled | + +{{
      }} + + +## Directives Info + +### Enable directive (`app_protect_dos_enable`) + +Enables/disables App Protect DoS module in the relevant block/s.
      +It can be written in the following contexts: `location/server/http`. + +The derived blocks/contexts also inherit the directive. +**For example:** A directive written in `http` context will be considered as if written also in all of the http's server blocks and their location blocks. + +In case of multiple directives in different contexts, the derived overwrites the base's directive. + + {{}} + +| Config | Expected | +|------- | -------- | +| Http block: directive is **on**
      Server block: none is written
      Location-1 block: none is written
      Location-2 block: none is written | VS1: the server block
      VS2: location-1 block
      VS3: location-2 block | +| Server block: directive is **on**
      Location-1 block: directive is **off**
      Location-2 block: none is written | VS1: the server block
      VS2: location-2 block | +| Http block: directive is **on**
      Server block: directive is **off**
      Location-1 block: directive is **on**
      Location-2 block: none is written | VS1: location-1 block | + + {{
      }} + + **Example:** + +```nginx +app_protect_dos_enable on; +``` + +### Policy directive (`app_protect_dos_policy_file`) + +This is the path to the JSON policy file which includes the product's configuration parameters. It can be written in the following contexts: `location/server/http`. + +The directive is optional. If not inserted then default path will be used, which is `/etc/app_protect_dos/BADOSDefaultPolicy.json`. + +If the configuration file doesn't exist or its attributes are invalid, default values will be used. + +`BADOSDefaultPolicy.json`: + +```json +{ + "mitigation_mode": "standard", + "signatures": "on", + "bad_actors": "on", + "automation_tools_detection": "on", + "tls_fingerprint" : "on" +} +``` + +{{}} + +| Parameter name | Values | Default | Description | +|:--------------- |:------- |:--------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mitigation_mode | standard / conservative / none | standard| **Standard** - module allowed to use global rate mitigation
      **Conservative** - module is not allowed to use global rate but only Signatures/Bad Actors mitigation
      **None** - module is not allowed to mitigate. Only to learn and report. | +| signatures | [on\|off] | on| Enable Signatures mechanism | +| bad_actors | [on\|off] | on| Enable Bad Actors mechanism | +| automation_tools_detection | [on\|off] | on |Enable the usage of automation tools detection (via cookies and redirect) | +| tls_fingerprint| [on\|off] | on | Enable source identification using TLS fingerprinting| + +{{
      }} + +{{}} + +| Scenario | Result | +|:--------- |:-------- | +| Directive is not written | Default path is used: "/etc/app_protect_dos/BADOSDefaultPolicy.json" | +| Directive is written | Path from the directive is used | +| File not found / file syntax is invalid | Default values are used | + +{{}} + +**Example:** + +```nginx +app_protect_dos_policy_file /etc/app_protect_dos/BADOSPolicy.json; +``` + +### Service Name directive (`app_protect_dos_name`) + +This is the Protected Object (VS) name, which should be unique and is used to identify the Protected Object in the logs.
      +It can be utilized within `location`, `server`, and `http` blocks.
      +
      +Directive is optional. If not written, then each protected object (VS) will have an auto-generated name according to the following syntax: + +`line_number-server_name:seq-location_name` + +**For example:** +`30-backend:1-/abc` + +- `line number:` the line number of the server block (`server {`) in the `nginx.conf` file (i.e. `30`)
      +- `server name:` taken from directive `server_name` (i.e. `backend`)
      +seq: 0 for server block, increments for each location block. i.e. VS created from server block will have 0 and VS's from location blocks will be 1,2,3,... (i.e. `1`) +- `location name:` the name of the location (i.e. `/abc`) + +NGINX App Protect DoS supports up to 300 Protected Objects for versions up to 4.3, and 1,000 Protected Objects in version 4.4 and above.
      +
      +**Example:** + +```nginx +app_protect_dos_name po-example; +``` + +### Monitor directive (`app_protect_dos_monitor`) + +The `app_protect_dos_monitor` directive is used to monitor the stress level of the Protected Object.
      +Requests for this monitoring are sent from localhost (127.0.0.1) and pass through the NGINX configuration to simulate regular client traffic.
      +This directive is mandatory, except when using the `http1` protocol, where it is still strongly recommended for optimal performance.
      + +**Syntax:**
      +app_protect_dos_monitor uri=path [protocol=http1|http2|grpc|websocket] [timeout=number] [proxy_protocol=on|off]; + +**Arguments**
      +Monitor directive has four arguments - **uri**, **protocol**, **timeout** and **proxy_protocol**. The first is mandatory and the rest are optional. + +- **URI** - The URI of the Protected Object as defined in the `nginx.conf`. This must point to a location block that proxies traffic to the backend (upstream) to ensure accurate monitoring.
      + Format: **scheme://server_name:port/location**. + + {{< note >}}For gRPC, the URI must specify a valid gRPC method (e.g., /RouteGuide/GetFeature).
      + The health check is not a true gRPC client, so its requests do not conform to the gRPC wire protocol. As a result, the backend responds with grpc-status: 12 (UNIMPLEMENTED), which is expected and treated as a successful health check. Regular gRPC client traffic is unaffected by this behavior.{{< /note >}} + +- **Protocol** - determines the protocol type of the service. Options are `http1 / http2 / grpc / websocket`.
      Default: `http1`.
      + + {{< note >}}HTTP2 and gRPC are supported from NGINX App Protect DoS v2, while WebSocket is supported from NGINX App Protect DoS v4. {{< /note >}} + +- **Timeout** - determines how long (in seconds) should NGINX App Protect DoS wait for a response.
      Default: 10 seconds for `http1/http2/websocket` and 5 seconds for `grpc`.
      + +- **Proxy Protocol** - Should be used when the listen directive of the corresponding server block contains the proxy_protocol parameter. + It adds an HAProxy PROXY protocol header to the monitor request. +
      Format is **proxy_protocol | proxy_protocol=on**.
      + Default: off.
      + + {{< note >}}The proxy_protocol is supported from NGINX App Protect DoS v3.1. {{< /note >}} + + +#### For Older Versions (NGINX App Protect DoS v1) + +In NGINX App Protect DoS v1, the app_protect_dos_monitor directive has only one argument: uri. +Only HTTP1 is supported. + +

      +**Examples:** + +1. HTTP/1 on Port 80: + +```nginx +listen 80; +server_name serv; + +location / { + # Protected Object is defined here + app_protect_dos_monitor uri=http://serv:80/; +} +``` + +{{< note >}}For NGINX App Protect DoS v1, use: app_protect_dos_monitor ; {{< /note >}} + +2. HTTP/2 Over SSL + +```nginx +listen 443 http2 reuseport ssl; +server_name serv; + +location / { + # Protected Object is defined here + app_protect_dos_monitor uri=https://serv:443/ protocol=http2 timeout=5; +} +``` + +3. gRPC Service on Port 50051 + +```nginx +listen 50051 http2 reuseport; +server_name my_grpc; + +location /routeguide. { + # Protected Object is defined here + # Note: The URI must include a valid gRPC method (e.g., /routeguide.RouteGuide/GetFeature). + # The health check will expect a grpc-status of 12 (UNIMPLEMENTED) because it is not a true gRPC client. + app_protect_dos_monitor uri=http://my_grpc:50051/routeguide.RouteGuide/GetFeature protocol=grpc; +} +``` + +4. Server with Proxy Protocol + +```nginx +listen 443 ssl http2 proxy_protocol; +server_name serv; + +location / { + # Protected Object is defined here + # Note: Use proxy_protocol=on if the listen directive includes the "proxy_protocol" parameter. + app_protect_dos_monitor uri=https://serv:443/ protocol=http2 timeout=5 proxy_protocol=on; +} + +location /abc { + # Protected Object is defined here + app_protect_dos_monitor uri=https://serv:443/abc protocol=http2 timeout=5 proxy_protocol; +} +``` + +5. WebSocket service + +```nginx +listen 80; +server_name wsserv; + +location /app/ { + # WebSocket configuration required by NGINX + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + + # Protected Object is defined here + app_protect_dos_monitor uri=http://wsserv:80/app/ protocol=websocket; +} +``` + +### Security log enable directive (`app_protect_dos_security_log_enable`) + +Enable/Disable App Protect DoS security logger. It can be used in `location/server/http` blocks. + +Directive is optional. If not written, then logger is disabled. + +
      **Example:** +```nginx +app_protect_dos_security_log_enable on; +``` + +### Security log directive (`app_protect_dos_security_log`) + +This directive has two string arguments. + +First argument is the configuration file path, i.e. `/etc/app_protect_dos/log-default.json`. + +Second argument is the destination (the location which the events will be sent to). The destination can be one of three options: + +- `syslog:server={ip}:{port}`, i.e. `syslog:server=1.2.3.4:3000` +- `stderr` (**default**) +- `{absolute_file_path}`, i.e. `/shared/dos_sec_logger.log` + +Implemented according to: [NGINX App Protect DoS Security Log]({{< relref "/nap-dos/monitoring/security-log.md" >}}) + + {{< note >}} + +- When using stderr, make sure that the process `admd` is not redirecting the stderr output to file. +- When using the Docker `entrypoint.sh` startup script from the admin guide, make sure that it doesn’t redirect stderr. + {{< /note >}} + + +**Examples:** + +- **Syslog:** + +```nginx +app_protect_dos_security_log "/etc/app_protect_dos/log-default.json" syslog:server=1.2.3.4:5000; +``` + +- **File:** + +```nginx +app_protect_dos_security_log "/etc/app_protect_dos/log-default.json" /shared/logger.log; +``` + +- **Stderr:** + +```nginx +app_protect_dos_security_log "/etc/app_protect_dos/log-default.json" stderr; +``` + +While `/etc/app_protect_dos/log-default.json` is: + +```json +{ + "filter": { + "traffic-mitigation-stats": "all", + "bad-actors": "top 10", + "attack-signatures": "top 10" + } +} +``` + +### Liveness probe directive (`app_protect_dos_liveness`) + +This directive has 3 arguments. + +{{}} + +| First argument | Second argument | Third argument | +| :-------------- | :--------------- | :-------------- | +| [on\|off] depending if this feature should be enabled or disabled. | URI Syntax is: `uri:___` | Port Syntax is: `port:____` | + +{{}} + + {{< note >}} +Second and Third arguments are optional; if one or more is not written, the default will take place. + {{< /note >}} + +If liveness is enabled, a request with URI and PORT that matches the probe configuration (i.e. `/app_protect_dos_liveness:8090`) will be answered with RC 200 "Alive" by our NGINX module, without being counted or pass to other handlers nor the backend server. + +Any other response will indicate that our NGINX module (NGINX App Protect DoS) has not received the request (possibly means that NGINX is down). + +**Example:** + +```nginx +app_protect_dos_liveness on uri:/liveness port:8090; +``` + +### Readiness probe directive (`app_protect_dos_readiness`) + +This directive has 3 arguments. + +{{}} + +| First argument | Second argument | Third argument | +| :-------------- | :--------------- | :-------------- | +| [on\|off] depending if this feature should be enabled or disabled. | URI Syntax is: `uri:___` | Port Syntax is: `port:____` | + +{{}} + + + {{< note >}} +Second and Third arguments are optional; if one or more is not written, the default will take place. + {{< /note >}} + +If readiness is enabled, a request with URI and PORT that matches the probe configuration (i.e. `/app_protect_dos_readiness:8090`) will be answered with RC 200 "Ready" or RC 503 "Not Ready" by our NGINX module, without being counted or pass to other handlers nor the backend server. + +Any other response will indicate that our NGINX module (NGINX App Protect DoS) has not received the request (possibly means that NGINX is down). + +RC 200 "Ready" will occur if two conditions are met: + +1. NGINX worker successfully connected to the global shared memory block +2. ADMD process is running (and not stuck) + +**Example:** + +```nginx +app_protect_dos_readiness on uri:/readiness port:8090; +``` + +### Arbitrator FQDN directive (`app_protect_dos_arb_fqdn`) + +Arbitrator FQDN directive has one argument which is the `FQDN/IP`. + +The argument is the FQDN to the desired Arbitrator. + +**Examples:** + +FQDN: + +```nginx +app_protect_dos_arb_fqdn svc-appprotect-dos-arb.arb.svc.cluster.local; +``` + +IP address: + +```nginx +app_protect_dos_arb_fqdn 192.168.1.10; +``` + +### API directive (`app_protect_dos_api`) + +This directive is used to enable the App Protect DoS monitoring capability via REST API.
      +The REST API interface provides extended metrics information of the Protected Objects. +It can be used by sending REST API requests manually or by using the App Protect DoS dashboard page. + +For more information refer to [NGINX App Protect DoS Live Activity Monitoring]({{< relref "/nap-dos/monitoring/live-activity-monitoring.md" >}}) + +**Example:** + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + app_protect_dos_api; + allow 192.168.1.0/24; + deny all; + } + location = /dashboard-dos.html { + root /usr/share/nginx/html; + } + } + } + ``` + +### Accelerated mitigation directive (`app_protect_dos_accelerated_mitigation`) + +This directive is used to enable or disable App Protect DoS L4 accelerated mitigation.
      + +syn_drop is an optional parameter; the default value is "off".
      +syn_drop=on mode is applicable for plane HTTP services or HTTPS when the `tls_fingerprint` feature is disabled. Refer to policy parameter "tls_fingerprint" in [Policy directive](#policy-directive-app_protect_dos_policy_file). +In syn_drop mode, the SYN packet of detected bad actors will be dropped. + +syn_drop mode is recommended for the deployments of NGINX App Protect DoS at the perimeter network or behind L3 load balancer. +Using this mode when NGINX App Protect DoS is deployed behind L4/L7 load balancer may result in the load balancer’s starvation during an attack. + +{{< note >}} +To use this directive you need to install the eBPF package. + +For more information about eBPF, you can read the [Accelerating DDoS Mitigation with eBPF in F5 NGINX App Protect DoS](https://www.f5.com/company/blog/nginx/accelerating-ddos-mitigation-with-ebpf-in-f5-nginx-app-protect-dos) article. + +{{< /note >}} + +**Example:** + +```nginx +app_protect_dos_accelerated_mitigation on syn_drop=on; +``` + +### Access File directive (`app_protect_dos_access_file`) + +The `app_protect_dos_access_file` directive defines an allowlist policy from a specified file.
      +This enables specifying IP addresses or ranges that should never be blocked.
      +The format of the file is the same as used in NGINX App Protect WAF, making it easy to reuse existing WAF policies with defined allowlist IPs.
      +
      +The directive is optional. If not written, then the allowlist feature is disabled.
      +
      +The file should include a list of IP addresses or ranges in JSON format. Both IPv4 and IPv6 addresses are supported.
      + +IPv4 addresses are in the format "a.b.c.d" where each component is a decimal number in the range 0-255.
      +IPv6 addresses are in the format "h1:h2:h3:h4:h5:h6:h7:h8" where each component is a hex number in the range 0x0-0xffff. Any contiguous range of zero elements can be omitted and replaced by "::".
      +IPv4 and IPv6 masks are written in the format "IP/xxx" (for example: /24), indicating the number of significant bits.
      +
      +The JSON file should include the ipAddress field for specifying IP addresses or ranges, and the blockRequests field set to "transparent". The file can also include $ref to reference additional files containing more IP addresses.
      +
      +Additionally, a second format is supported where the mask is specified in a dedicated field `ipMask`. The mask should be written in the standard subnet notation for IPv4 and IPv6 addresses. In this format, the `blockRequests` field should have a value of "never" instead of "transparent".
      +
      + +**Example:** +```nginx +app_protect_dos_access_file "/etc/app_protect_dos/allowlist.json"; +``` + +**Example content of /etc/app_protect_dos/allowlist.json:** +```nginx +{ + "policy": { + "ip-address-lists": [ + { + "ipAddresses": [ + { "ipAddress": "1.1.1.1" }, + { "ipAddress": "1.1.1.1/32" }, + { "ipAddress": "3.3.3.0/24" }, + { "ipAddress": "2023::4ef3/128" }, + { "ipAddress": "2034::2300/120" } + ], + "blockRequests": "transparent" + }, + { + "$ref": "/etc/app_protect_dos/additional_ips.json", + "blockRequests": "transparent" + } + ] + } +} +``` + +**Example content of /etc/app_protect_dos/additional_ips.json:** +```nginx +{ + "ipAddresses": [ + { "ipAddress": "2.2.2.2/32" }, + { "ipAddress": "4.4.4.0/24" } + ] +} +``` + +**Example content with second format:** +```nginx +{ + "policy":{ + "whitelist-ips":[ + { + "ipAddress":"2034::2300", + "ipMask":"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00", + "blockRequests":"never" + }, + { + "blockRequests":"never", + "ipAddress":"4.4.4.0", + "ipMask":"255.255.255.0" + } + ] + } +} +``` diff --git a/content/nap-dos/monitoring/_index.md b/content/nap-dos/monitoring/_index.md new file mode 100644 index 000000000..2c5f1da8c --- /dev/null +++ b/content/nap-dos/monitoring/_index.md @@ -0,0 +1,10 @@ +--- +description: Documentation explaining how to monitor, generate logs for, and debug + F5 NGINX App Protect DoS. +menu: + docs: + parent: NGINX App Protect DoS Documentation. +title: Monitoring +weight: 130 +url: /nginx-app-protect-dos/monitoring/ +--- diff --git a/content/nap-dos/monitoring/access-log.md b/content/nap-dos/monitoring/access-log.md new file mode 100644 index 000000000..85185e723 --- /dev/null +++ b/content/nap-dos/monitoring/access-log.md @@ -0,0 +1,76 @@ +--- +description: Learn about the F5 NGINX App Protect DoS Request Log Mechanism. +docs: DOCS-668 +doctypes: +- task +title: NGINX App Protect DoS Access Log Request Mechanism +toc: true +weight: 160 +--- + +## Access Logs +Access Log is NGINX’s request log mechanism. It is controlled by the following two directives.
      + +### log_format +This directive determines the format of the log messages using predefined variables. App Protect DoS will enrich this set of variables with several security log attributes that are available to be included in the `log_format`. If `log_format` is not specified then the built-in format `combined` is used but, because that format does not include the extended App Protect DoS variables, this directive must be used when the user wants to add App Protect DoS information to the log. + +### access_log +This directive determines the destination of the `access_log` and the name of the format according to the official [F5 NGINX documentation](https://docs.nginx.com). + +For example: `access_log /var/log/nginx/access.log log_dos`; (`log_dos` is predefined in the log_format directive). + +## App Protect Variables for Access Log +These are the variables added to Access Log. They are a subset of the Security log attributes. The Security log names are prefixed with `$app_protect_dos`. + +{{}} + +|Name| Meaning |Comment| +|--- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------ | +|`$app_protect_dos_outcome`| One of:
      **Allow**: request was sent to origin server
      **Redirect**: http redirection
      **Challenge**: JS challenge
      **Block**: blocked request || +|`$app_protect_dos_outcome reason`| One of:
      **Allow**: Request not mitigated, passed DoS flow successfully.
      **Allowlist**: Request not mitigated because it is on the allowlist.
      **Bypass**: Request not mitigated due to internal failure.
      **Bad_Actor:**: Request mitigated as a bad actor.
      **Signature**: Request mitigated as a matched DoS attack signature.
      **Global_Rate**: Request mitigated as exceeding the calculated global request rate.
      **Slow_Body**: Request mitigated due to being a slow request. |Combine `MITIGATED_BY_GLOBAL_RATE` with global rate value (in RPS) for example `Global_Rate, value=152,` | +|`$app_protect_dos_tls_fp`| TLS Fingerprint - a value which identifies the sender |Applicable only in TLS (SSL) traffic| +|`$app_protect_dos_policy_name`| The name of the policy that enforced the request || +|`$app_protect_dos_vs_name`| The name of the protected object || +|`$app_protect_dos_version`| The App Protect DoS version string:
      major.minor.build format. |Does not include the F5 NGINX plus version (e.g. R21). The latter is available in `$version` variable.| + +{{
      }} + + {{< note >}} +Many of the other Security log attributes that are not included here have exact or similar parallels among the NGINX variables also available for access log. For example, `$request` is parallel to the `request` security log attribute. See the full list of [NGINX variables](https://nginx.org/en/docs/http/ngx_http_log_module.html). + {{< /note >}} + + +## Logging Rate Limit - mandatory configuration + +During a DoS attack, there is a large quantity of incoming requests which can flood the Access Log. +The rate of the access log's entries can be limited in order to avoid this flood. + +NGINX logs all the requests during peacetime and logs up to 10 entries per second for each outcome reason during attack time. In worst case it can be 50 requests per second under attack. + +Two things should be configured in the `nginx conf` file: + +1. Create a variable called `loggable` using NGINX's `set` directive and give it any value (string or numerical).
      + Note that the scope of the `set` directive is **server** or **location** block.
      + For example: **set $loggable '1'**; + +2. Add the string **"if=$loggable"** to the **access_log** directive's argument. + For example: access_log /var/log/nginx/access.log custom **if=$loggable**; + +## Example + +```nginx +http { + log_format security_dos 'request_time=$request_time client_ip=$remote_addr,' + 'request="$request", status=$status,' + 'dos_policy=$app_protect_dos_policy_name, dos_protected_object=app_protect_dos_vs_name' + 'dos_action=$app_protect_dos_outcome, dos_action_reason=$app_protect_dos_outcome_reason'; + + server { + location / { + set $loggable 1; + access_log /var/log/nginx/access.log security_dos if=$loggable;; + ... + } + } +} +``` diff --git a/content/nap-dos/monitoring/live-activity-monitoring.md b/content/nap-dos/monitoring/live-activity-monitoring.md new file mode 100644 index 000000000..d8a5b0b80 --- /dev/null +++ b/content/nap-dos/monitoring/live-activity-monitoring.md @@ -0,0 +1,267 @@ +--- +description: Learn about the F5 NGINX App Protect DoS Live Activity Monitoring. +docs: DOCS-1389 +doctypes: +- task +title: NGINX App Protect DoS Live Activity Monitoring +toc: true +weight: 140 +--- + +{{< img src="/dashboard/dos-tab.png" alt="NGINX App Protect DoS Dashboard" >}} + +## Overview + +F5 NGINX App Protect DoS offers a variety of application monitoring tools: + +- **App Protect DoS Dashboard Page**: This dynamic interface provides real-time monitoring and details of Protected Objects. +- **NGINX App Protect DoS REST API**: This interface offers comprehensive metrics about the Protected Objects. + +## Prerequisites + +- NGINX Plus R26 or later is required for accessing the NGINX App Protect DoS REST API and the DoS Dashboard. + +## API Configuration Steps + +To activate the API: + +- Define a `server` block within the `http` context to handle the API: + + ```nginx + http { + server { + # insert your API configuration here + } + } + ``` + +- Create a `location` for API requests and include the `app_protect_dos_api` directive: + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + app_protect_dos_api; + # ... + } + } + } + ``` + +- Limit API location access, for example, permit only local network access using `allow` and `deny` directives: + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + app_protect_dos_api; + allow 192.168.1.0/24; + deny all; + } + } + } + ``` + +- Implement HTTP basic authentication to restrict access to PATCH, POST, and DELETE methods to certain users: + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + app_protect_dos_api; + allow 192.168.1.0/24; + deny all; + } + } + } + ``` + +- Enable the DoS Dashboard by defining the /dashboard-dos.html location, which is typically in the root directory: + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + app_protect_dos_api; + allow 192.168.1.0/24; + deny all; + } + location = /dashboard-dos.html { + root /usr/share/nginx/html; + } + } + } + ``` + +## Using the Dashboard +### Accessing the Dashboard +To view the Dashboard, enter the corresponding address in your browser's address bar.
      +For example, "http://192.168.1.23/dashboard-dos.html" will show the Dashboard page located in /usr/share/nginx/html as indicated in the root directive. + +### DoS Tab Overview +The **DoS** tab displays real-time statistics, settings, and traffic graphs for each Protected Object.
      +In multi-instance environments with an arbitrator, these statistics will be combined. + +{{}} + +| Metric name | Values | Description | +|:--------------- |:------- |:-----------| +| Name | - | The name of the Protected Object, as defined by the `app_protect_dos_name` directive (or auto-generated if not present) | +| Health | [good\|bad] | The health of the backend server, as defined by the `uri` argument of the `app_protect_dos_monitor` directive | +| Under Attack | [yes\|no] | Whether the Protected Object is under attack or not | +| Req/s | - | Number of incoming requests per second | +| Mitigations/s | - | Number of mitigated requests per second | +| Requests | - | Total number of incoming requests | +| Mitigations | - | Total number of mitigated requests | +| Learning | [ready\|ba only\|not ready] | Whether NGINX App Protect DoS collected enough data to protect the Protected Object | +| Protocol | [http1\|http2\|grpc] | As defined by the `protocol` argument of the `app_protect_dos_monitor` directive | +| Mitigation Mode | [standard\|conservative\|none] | As defined by the `mitigation_mode` object in the JSON policy file from the `app_protect_dos_policy_file` directive | +| Signatures | [on\|off] | As defined by the `signatures` object in the JSON policy file from the `app_protect_dos_policy_file` directive. Values - on/off | +| Bad Actors | [on\|off] | As defined by the `bad_actors` object in the JSON policy file from the `app_protect_dos_policy_file` directive | +| Automation Tools Detection | [on\|off] | As defined by the `automation_tools_detection` object in the JSON policy file from the `app_protect_dos_policy_file` directive | +| TLS Fingerprint| [on\|off] | As defined by the `tls_fingerprint` object in the JSON policy file from the `app_protect_dos_policy_file` directive | + +{{}} +
      + +### Graph Representation +The displayed graph is a stacked composition, encompassing two specific metrics: `Passthrough Requests` and `Mitigations`, with both metrics measured on a per-second basis. + +- `Passthrough Requests` represents the count of requests successfully forwarded to the backend server. +- `Mitigations` indicates the quantity of requests that have been mitigated. +
      + The sum of these two metrics at any given moment provides the aggregate count of incoming requests. + +### Status Color Indicators + +- **Green**: Indicates a normal state, with no attack and optimal server health. +- **Yellow**: Indicates the presence of either an active attack or diminished server health. +- **Red**: Indicates a severe scenario, where the object is experiencing an attack combined with poor health. + +### Configuring Dashboard Options +You can configure the dashboard by clicking the Gear button in the Tabs menu.
      +**Update every N sec** - updates the Dashboard data after the specified number of seconds, default is 1 second.
      +
      + +## Using the REST API +Statistics of your server infrastructure can be managed with the REST API interface. The API is based on standard HTTP requests: statistics can be obtained with `GET` requests. + +The requests are sent in the JSON format that allows you to connect the stats to monitoring tools or dashboards that support JSON. + +The status information of any element can be accessed with a slash-separated URL. The URL may look as follows: +`http://192.168.1.23/api/dos/1/protected_objects` + +where: + +- `/api` represents the configured endpoint in your NGINX setup for API access. +- `/dos` differentiates the App Protect DoS API from other NGINX Plus APIs. +- `/1` indicates the API version you are using, with the current version being 1. +- `/protected_objects` points to the specific resource or data you want to access. + +Responses from the API are in JSON format for easy parsing and analysis. + +To view all primary endpoints, use a GET request via 'curl' in the terminal: + +```shell +curl -s 'http://192.168.1.23/api/dos/1/' | json_pp +``` + +This command returns a JSON array listing the main access points, like: + +```json +["protected_objects"] +``` + +For obtaining detailed statistics about a particular endpoint, execute a similar GET request: + +```shell +curl -s 'http://192.168.1.23/api/dos/1/protected_objects' | json_pp +``` + +### APIs overview + +#### Endpoints +{{}} + +| Path | Description | +|:------ |:-----------| +| / | Return list of root endpoints | +| /protected_objects/ | Return statistics of all Protected Objects | +| /protected_objects/{protectedObjectName} | Return statistics of a Protected Object | + +{{}} + +An example response from the `/protected_objects` endpoint could be: + +```json +{ + "po_1": { + "attack": false, + "health": 0.50, + "rps": 12, + "mps": 0, + "requests": 500123, + "passthrough": 260023, + "mitigations": 240100, + "learning": "ready", + "protocol": "http1", + "mode": "standard", + "sig": true, + "ba": true, + "auto_tools": true, + "tls_fp": true + }, + "po_http2": { + "attack": false, + "health": 0.50, + "rps": 20, + "mps": 0, + "requests": 500123, + "passthrough": 260023, + "mitigations": 240100, + "learning": "not_ready", + "protocol": "http2", + "mode": "conservative", + "sig": true, + "ba": true, + "auto_tools": true, + "tls_fp": true + }, + "po_grpc": { + "attack": false, + "health": 0.50, + "rps": 25, + "mps": 0, + "requests": 6000123, + "passthrough": 2599123, + "mitigations": 3401000, + "learning": "ready", + "protocol": "grpc", + "mode": "standard", + "sig": true, + "ba": true, + "auto_tools": true, + "tls_fp": true + } +} +``` diff --git a/content/nap-dos/monitoring/operation-log.md b/content/nap-dos/monitoring/operation-log.md new file mode 100644 index 000000000..dbfae5bc9 --- /dev/null +++ b/content/nap-dos/monitoring/operation-log.md @@ -0,0 +1,133 @@ +--- +description: Learn about the F5 NGINX App Protect DoS Operation Log. +docs: DOCS-669 +doctypes: +- task +title: NGINX App Protect DoS Operation Log +toc: true +weight: 180 +--- + +## Overview + +The operation logs consists of system operational and health events. The events are sent to the NGINX error log and are distinguished by the `APP_PROTECT_DOS` prefix followed by JSON body. The log level depends on the event: success is usually `notice` while failure is `error`. The timestamp is inherent in the error log. + +## Events + +{{}} + +|Event Type|Level|Meaning| +|--------- |-----|------ | +|Configuration Error |error |There were errors in the [directives]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md" >}}) in the `nginx.conf` file.
      Configuration error event is produced when one of F5 NGINX App Protect DoS directives is supplied with an incorrect data. An additional information will be added to the message, describing what was incorrect. NGINX will run with default values for this directive.
      Please note that if the directive supplied with an incorrect number of arguments then NGINX will issue an error and NGINX will not run. It is a generic NGINX behavior.| + +{{
      }} + +```json +{ + "event": "configuration_error", + "error_message": "unknown argument", + "line_number": 58 +} +``` + +{{}} + +|Event Type|Level|Meaning| +|--------- |-----|------ | +|Configuration Load Failure |error |There was an error in one of the configuration files: file not found, failed to parse.| + +{{}} + +```json +{ + "event": "configuration_load_failure", + "error_message": "Failed to load Policy '/etc/app_protect_dos/BADOSDefaultPolicy.json' : Fail parse JSON Policy: malformed JSON string, neither tag, array, object, number, string or atom, at character offset 0 (before \"xxxx\\nhdjk\\n\\n555\\n\") \n.\n", + "error_line_number": 58 +} +``` + +{{}} + +|Event Type|Level|Meaning| +|--------- |-----|------ | +|Configuration Load Success | notice |The `APP_PROTECT_DOS` configuration process ended successfully: all policies, log configuration and global settings were loaded to NGINX App Protect DoS and all traffic will be handled by this configuration. The `error_message` contains warnings. This event is also generated on the initial configuration (when NGINX Plus starts).| + +{{}} + +```json +{ + "event": "configuration_load_success", + "software_version": "x.x.x.x.x", + "error_message": "warning if exists..." +} +``` + +{{}} + +|Event Type|Level|Meaning| +|--------- |-----|------ | +|Shared Memory Failure |error |A worker attempted to connect to shared memory but the operation failed. One time error (per worker) - upon switch from `init` or `operational` mode to `failure`. The `mode` should be `failure`.| + +{{}} + + +```json +{ + "event": "shared_memory_failure", + "worker_pid": 4928, + "mode": "failure", + "mode_changed": true +} +``` + +{{}} + +|Event Type|Level|Meaning| +|--------- |-----|------ | +|Shared Memory Connected |notice |A worker successfully connected to shared memory.
      One time message (per worker) - upon switch from init or failure mode to `operational`.
      The `mode` attribute should be `operational`, unless there is an ongoing problem.| + +{{
      }} + +```json +{ + "event": "shared_memory_connected", + "worker_pid": 4928, + "mode": "operational", + "mode_changed": true +} +``` + +{{}} + +|Event Type|Level|Meaning| +|--------- |-----|------ | +| eBPF Failure | error | A worker attempted to connect to eBPF maps but the operation failed. One time error (per worker) - upon switch from `init` or `operational` mode to `failure`. The mode should be `failure`.| + +{{}} + + +```json +{ + "event": "ebpf_failure", + "worker_pid": 4928, + "mode": "failure", + "mode_changed": true +} +``` + +{{}} + +|Event Type|Level|Meaning| +|--------- |-----|------ | +| eBPF Connected | notice | A worker successfully connected to eBPF maps.
      One time message (per worker) - upon switch from `init` or `failure` mode to `operational`.
      The `mode` attribute should be `operational`, unless there is an ongoing problem. | + +{{
      }} + +```json +{ + "event": "ebpf_connected", + "worker_pid": 4928, + "mode": "operational", + "mode_changed": true +} +``` diff --git a/content/nap-dos/monitoring/security-log.md b/content/nap-dos/monitoring/security-log.md new file mode 100644 index 000000000..ca40a57f2 --- /dev/null +++ b/content/nap-dos/monitoring/security-log.md @@ -0,0 +1,435 @@ +--- +description: Learn about the F5 NGINX App Protect DoS Security Log. +docs: DOCS-670 +doctypes: +- task +title: NGINX App Protect DoS Security Log +toc: true +weight: 140 +--- + +## Overview + +Security logs contain information about the status of the protected objects. It gives a general picture about each protected object in terms of traffic intensity, health of the backend server, learning and mitigations. + +There are several types of logs, each contains different information and published either periodically or upon an important event. + +### Dictionary + +The following table lists all the possible fields in the logs and their meaning. + +{{}} + +| Field | Type | Meaning | +|----------------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `date_time` | string | the date and time of the event | +| `product` | string | always set to `app-protect-dos` | +| `product_version` | string | F5 NGINX App Protect DoS version | +| `unit_hostname` | string | host name of the app-protect-dos instance | +| `instance_id` | string | instance ID: container id from `/proc/self/cgroup`or hostname if container is is not available | +| `vs_name` | string | A unique identifier (representing the protected object's name) of the location in the `nginx.conf` file that this request is associated with. It contains the line number of the containing server block in `nginx.conf`, the server name, a numeric discriminator that distinguishes between multiple entries within the same server, and the location name.
      For example: `34-mydomain.com:0-~/.*php(2)`. | +| `dos_attack_id`| integer | unique attack IP per unit_hostname | +| `attack_event` | string | Event name as it appears in remote logger. | +| `stress_level` | float | a number from 0 to ... that reflects stress level. | +| `learning_confidence` | string | the possible values are **not ready/bad actors only/ready** | +| `baseline_dps` | integer | learned datagrams per second (DPS) | +| `incoming_dps` | integer | current datagrams per second (DPS) | +| `incoming_rps` | integer | current RPS (requests per second) | +| `successful_tps` | integer | successful TPS (successful requests per second - Any RC but 5xx) | +| `allowlist_rps` | integer | allowlist requests per second | +| `unsuccessful_rps` | integer | unsuccessful requests per second (passed to server and not responded: `reset / timeout / 5xx` | +| `incoming_datagrams` | integer | incremental number of incoming datagrams | +| `incoming_requests` | integer | incremental number of incoming requests | +| `allowlist_requests` | integer | incremental number of allowlist requests | +| `successful_responses` | integer | incremental number of successful responses | +| `unsuccessful_requests` | integer | incremental number of unsuccessful requests (passed to server and not responded: `reset / timeout / 5xx` | +| `active_connections`| integer | current number of active server connections | +| `threshold_dps` | float | global rate DPS threshold | +| `threshold_conns` | float | active connections threshold | +| `mitigated_bad_actors`
      `redirect_bad_actor`
      `challenge_bad_actor`
      `block_bad_actor` | integer | incremental number of mitigated bad actors. Increments upon any type of bad actors mitigations.
      incremental number of http redirections sent to detected bad actors
      incremental number of JS challenges sent to detected bad actors
      incremental number of blocked bad actors | +| `mitigated_by_signatures`
      `redirect_signature`
      `challenge_signature`
      `block_signature` | integer | incremental number of requests mitigated by signatures. Increments upon any type of signatures mitigations.
      incremental number of http redirections sent to clients when requests match a signature.
      incremental number of JS challenges sent to clients when requests match a signature.
      incremental number of blocked requests when requests match a signature. | +| `mitigated_by_global_rate`
      `redirect_global`
      `challenge_global`
      `block_global` | integer | incremental number of requests mitigated by global_rate. Increments upon any type of global rate mitigations.
      incremental number of http redirections sent to clients upon global rate mitigation.
      incremental number of JS challenges sent to clients upon global rate mitigation.
      incremental number of blocked requests upon global rate mitigation. | +| `mitigated_slow`
      `redirect_slow`
      `challenge_slow`
      `block_slow`| integer | incremental number of mitigated slow requests. Increments upon any type of slow requests mitigations.
      incremental number of http redirections sent to clients upon slow request mitigation.
      incremental number of JS challenges sent to clients upon slow request mitigation.
      incremental number of blocked slow requests. | +| `mitigated_connections` | integer | incremental number of mitigated by connections mitigation | +| `mitigated_bad_actors_l4` | integer | incremental number of mitigated by L4 accelerated mitigation | +| `mitigated_bad_actors_rps`
      `redirect_bad_actor_rps`
      `challenge_bad_actor_rps`
      `block_bad_actor_rps` | integer | mitigated_bad_actors rps. Includes any type of bad actors mitigations.
      http redirections per second sent to detected bad actors.
      JS challenges per second sent to detected bad actors.
      blocked bad actors per second. | +| `mitigated_by_signatures_rps`
      `redirect_signature_rps`
      `challenge_signature_rps`
      `block_signature_rps` | integer | mitigated_signatures rps. Includes any type of signatures mitigations.
      http redirections sent per second to clients when requests match a signature.
      JS challenges per second sent to clients when requests match a signature.
      blocked requests per second when requests match a signature. | +| `mitigated_slow_rps`
      `redirect_slow_rps`
      `challenge_slow_rps`
      `block_slow_rps` | integer | mitigated slow requests per second. Includes any type of slow requests mitigations.
      http redirections per second sent to clients upon slow request mitigation.
      JS challenges per second sent to clients upon slow request mitigation.
      blocked slow requests per second. | +| `mitigated_by_global_rate_rps`
      `redirect_global_rps`
      `challenge_global_rps`
      `block_global_rps` | integer | mitigated_global_rate rps. Includes any type of global rate mitigations.
      http redirections per second sent to clients upon global rate mitigation.
      JS challenges per second sent to clients upon global rate mitigation.
      blocked requests per second upon global rate mitigation. | +| `mitigated_bad_actors_l4_rps` | integer | blocked requests per second when mitigated by L4 accelerated mitigation | +| `mitigated_connections_rps` | integer | mitigated_connections rps | +| `source_ip`
      `tls_fp`
      `impact_rps` | string
      string
      integer | ip address of the detected bad actor `1.1.1.1`
      TLS Fingerprint of the bad actor
      RPS created by bad actor in the time of the detection **(to be calculated as a max hitcount in AMT / 10)** | +| `new_bad_actors_detected`
      `bad_actors` | integer | the number of newly detected bad actors
      the number of bad actors | +| `signature`
      `signature_id`
      `signature_efficiency`
      `signature_accuracy`| string
      integer
      float
      float | signature string `http.request.method eq GET and http.uri_parameters eq 6`
      unique signature ID per unit_host
      estimated efficiency upon signature detection: percentage of bad traffic covered by the signature
      estimated accuracy upon signature detection: percentage of learned good traffic NOT covered by the signature | + +{{
      }} + +## Events + +### 1a. Attack notification + +Reports about the start and end of an attack, as well as major parameters of ongoing attacks. + +a. Example: **Attack Started** + +```shell +date_time="Oct 05 2021 08:01:00", +product="app-protect-dos", +product_version="25+1.78.0-1.el7.ngx", +unit_hostname="localhost.localdomain", +instance_id="129c76", +vs_name="example.com/", +dos_attack_id="1", +attack_event="Attack started", +stress_level="1.00", +learning_confidence="Ready", +baseline_dps="17", +incoming_dps="181", +incoming_rps="181", +successful_tps="0", +allowlist_rps="0", +unsuccessful_rps="0", +incoming_datagrams="8576", +incoming_requests="8576", +allowlist_requests="162", +successful_responses="5265", +unsuccessful_requests="0", +active_connections="58", +threshold_dps="41.60", +threshold_conns="41.60", +mitigated_bad_actors="0", +mitigated_by_signatures="0", +mitigated_by_global_rate="0", +mitigated_bad_actors_l4="0", +mitigated_slow="0", +redirect_global="0", +redirect_bad_actor="0", +redirect_signature="0", +redirect_slow="0", +challenge_global="0", +challenge_bad_actor="0", +challenge_signature="0", +challenge_slow="0", +block_global="0", +block_bad_actor="0", +block_signature="0", +block_slow="0", +mitigated_connections="0", +mitigated_bad_actors_rps="0", +mitigated_by_signatures_rps="0", +mitigated_by_global_rate_rps="0", +mitigated_bad_actors_l4_rps="0", +mitigated_slow_rps="0", +redirect_global_rps="0", +redirect_bad_actor_rps="0", +redirect_signature_rps="0", +redirect_slow_rps="0", +challenge_global_rps="0", +challenge_bad_actor_rps="0", +challenge_signature_rps="0", +challenge_slow_rps="0", +block_global_rps="0", +block_bad_actor_rps="0", +block_signature_rps="0", +block_slow_rps="0", +mitigated_connections_rps="0", +``` + +b. Example: **Attack Ended** + +```shell +date_time="Oct 05 2021 08:06:21", +product="app-protect-dos", +product_version="25+1.78.0-1.el7.ngx", +unit_hostname="localhost.localdomain", +instance_id="129c76", +vs_name="example.com/", +dos_attack_id="1", +attack_event="Attack ended", +stress_level="0.50", +learning_confidence="Ready", +baseline_dps="12", +incoming_dps="0", +incoming_rps="0", +successful_tps="0", +allowlist_rps="0", +unsuccessful_rps="0", +incoming_datagrams="226566", +incoming_requests="226566", +allowlist_requests="1632", +successful_responses="7760", +unsuccessful_requests="0", +active_connections="0", +threshold_dps="2121.60", +threshold_conns="2121.60", +mitigated_bad_actors="94488", +mitigated_by_signatures="117361", +mitigated_by_global_rate="2861", +mitigated_bad_actors_l4="62788", +mitigated_slow="0", +redirect_global="2861", +redirect_bad_actor="94488", +redirect_signature="117361", +redirect_slow="0", +challenge_global="0", +challenge_bad_actor="0", +challenge_signature="0", +challenge_slow="0", +block_global="0", +block_bad_actor="0", +block_signature="0", +block_slow="0", +mitigated_connections="0", +mitigated_bad_actors_rps="0", +mitigated_by_signatures_rps="0", +mitigated_by_global_rate_rps="0", +mitigated_bad_actors_l4_rps="0", +mitigated_slow_rps="0", +redirect_global_rps="0", +redirect_bad_actor_rps="0", +redirect_signature_rps="0", +redirect_slow_rps="0", +challenge_global_rps="0", +challenge_bad_actor_rps="0", +challenge_signature_rps="0", +challenge_slow_rps="0", +block_global_rps="0", +block_bad_actor_rps="0", +block_signature_rps="0", +block_slow_rps="0", +mitigated_connections_rps="0", +``` + +### 1b. Traffic/Mitigation summary stats + +Reported periodically, providing aggregated statistics per protected object.
      +This corresponds to the metrics reported on the main Grafana screen. + +a. Example: **No Attack** + +```shell +date_time="Oct 05 2021 07:54:29", +product="app-protect-dos", +product_version="25+1.78.0-1.el7.ngx", +unit_hostname="localhost.localdomain", +instance_id="129c76", +vs_name="example.com/", +dos_attack_id="0", +attack_event="No Attack", +stress_level="0.50", +learning_confidence="Not ready", +baseline_dps="19", +incoming_dps="9", +incoming_rps="9", +successful_tps="10", +allowlist_rps="1", +unsuccessful_rps="0", +incoming_datagrams="678", +incoming_requests="678", +allowlist_requests="52", +successful_responses="678", +unsuccessful_requests="0", +active_connections="0", +threshold_dps="2121.60", +threshold_conns="2121.60", +mitigated_bad_actors="0", +mitigated_by_signatures="0", +mitigated_by_global_rate="0", +mitigated_bad_actors_l4="0", +mitigated_slow="0", +redirect_global="0", +redirect_bad_actor="0", +redirect_signature="0", +redirect_slow="0", +challenge_global="0", +challenge_bad_actor="0", +challenge_signature="0", +challenge_slow="0", +block_global="0", +block_bad_actor="0", +block_signature="0", +block_slow="0", +mitigated_connections="0", +mitigated_bad_actors_rps="0", +mitigated_by_signatures_rps="0", +mitigated_by_global_rate_rps="0", +mitigated_bad_actors_l4_rps="0", +mitigated_slow_rps="0", +redirect_global_rps="0", +redirect_bad_actor_rps="0", +redirect_signature_rps="0", +redirect_slow_rps="0", +challenge_global_rps="0", +challenge_bad_actor_rps="0", +challenge_signature_rps="0", +challenge_slow_rps="0", +block_global_rps="0", +block_bad_actor_rps="0", +block_signature_rps="0", +block_slow_rps="0", +mitigated_connections_rps="0", +``` + +b. Example: **Under Attack** + +```shell +date_time="Oct 05 2021 08:02:35", +product="app-protect-dos", +product_version="25+1.78.0-1.el7.ngx", +unit_hostname="localhost.localdomain", +instance_id="129c76", +vs_name="example.com/", +dos_attack_id="1", +attack_event="Under Attack", +stress_level="0.50", +learning_confidence="Ready", +baseline_dps="12", +incoming_dps="893", +incoming_rps="893", +successful_tps="12", +allowlist_rps="1", +unsuccessful_rps="0", +incoming_datagrams="87823", +incoming_requests="87823", +allowlist_requests="1523", +successful_responses="5736", +unsuccessful_requests="0", +active_connections="1", +threshold_dps="92.40", +threshold_conns="92.40", +mitigated_bad_actors="0", +mitigated_by_signatures="75137", +mitigated_by_global_rate="2861", +mitigated_bad_actors_l4="62788", +mitigated_slow="0", +redirect_global="2861", +redirect_bad_actor="0", +redirect_signature="75137", +redirect_slow="0", +challenge_global="0", +challenge_bad_actor="0", +challenge_signature="0", +challenge_slow="0", +block_global="0", +block_bad_actor="0", +block_signature="0", +block_slow="0", +mitigated_connections="0", +mitigated_bad_actors_rps="0", +mitigated_by_signatures_rps="879", +mitigated_by_global_rate_rps="0", +mitigated_bad_actors_l4_rps="0", +mitigated_slow_rps="0", +redirect_global_rps="0", +redirect_bad_actor_rps="0", +redirect_signature_rps="879", +redirect_slow_rps="0", +challenge_global_rps="0", +challenge_bad_actor_rps="0", +challenge_signature_rps="0", +challenge_slow_rps="0", +block_global_rps="0", +block_bad_actor_rps="0", +block_signature_rps="0", +block_slow_rps="0", +mitigated_connections_rps="0", +``` + +### 2. Bad actor detection/expiration +Reports NGINX App Protect DoS decisions regarding bad actors. + +a. Example: **Bad Actor Detection** + +```shell +date_time="Apr 29 2021 14:03:01", +product="app-protect-dos", +product_version="23+1.54.1-1.el7.ngx", +unit_hostname="localhost.localdomain", +instance_id="d9a6d8", +vs_name="example.com/", +dos_attack_id="1", +attack_event="Bad actor detection", +source_ip="5.5.5.9", +impact_rps="30", +``` + +b. Example: **Bad Actor Expired** + +```shell +date_time="Apr 29 2021 14:05:29", +product="app-protect-dos", +product_version="23+1.54.1-1.el7.ngx", +unit_hostname="localhost.localdomain", +instance_id="d9a6d8", +vs_name="example.com/", +dos_attack_id="0", +attack_event="Bad actor expired", +source_ip="5.5.5.10", +impact_rps="12", +``` + +### 3. Attack signatures +Reports NGINX App Protect DoS decisions regarding signatures.
      + +Example: **Attack Signature Detected** + +```shell +date_time="Apr 29 2021 14:02:56", +product="app-protect-dos", +product_version="23+1.54.1-1.el7.ngx", +unit_hostname="localhost.localdomain", +instance_id="d9a6d8", +vs_name="example.com/", +dos_attack_id="1", +attack_event="Attack signature detected", +signature="(http.user_agent_header_exists eq true) and (http.accept contains other-than(application|audio|message|text|image|multipart)) and (http.unknown_header_exists eq true) and (http.headers_count neq 10) and (http.x_forwarded_for_header_exists eq false) and (http.uri_parameters eq 1) and (http.uri_len between 48-63) and (http.accept_header_exists eq true) and (http.hdrorder not-hashes-to 55) and (http.connection_header_exists eq true) and (http.accept_encoding_header_exists eq true) and (http.request.method eq reserved) and (http.cookie_header_exists eq true) and (http.uri_file hashes-to 7) and (http.host_header_exists eq true)", +signature_id="809655398", +signature_efficiency="72.00", +signature_accuracy="100.00", +``` + +### 4. Bad actors detection information +Provides detailed information about bad actors.
      + +Example: **Bad Actors Detected** + +```shell +date_time="Apr 29 2021 14:02:00", +product="app-protect-dos", +product_version="23+1.54.1-1.el7.ngx", +unit_hostname="localhost.localdomain", +instance_id="d9a6d8", +vs_name="example.com/", +dos_attack_id="1", +attack_event="Bad actors detected", +new_bad_actors_detected="2", +bad_actors="2", +``` + +## Security Log Configuration File +The file is in JSON format. +
      +### Filter
      + +{{}} + +| Element | Description | Type/Values| Default | +|----------|--------------| ---------- | -----------| +|traffic-mitigation-stats| This filter element refers to [Traffic/Mitigation summary stats](#1b-trafficmitigation-summary-stats).| **Enumerated values:**
      - **all**
      - **none**| `all` | +|bad-actors| This filter element refers to [Bad actor detection/expiration](#2-bad-actor-detectionexpiration), every 10 seconds.| **Enumerated values:**
      - **all**
      - **none**
      - **top N**| `top 10` | +|attack-signatures| This filter element refers to [Attack Signatures](#3-attack-signatures), every 10 seconds.| **Enumerated values:**
      - **all**
      - **none**
      - **top N**| `top 10` | + +{{
      }} + + +Example: + +```json +{ + "filter": { + "traffic-mitigation-stats": "all", + "bad-actors": "top 100", + "attack-signatures": "top 100" + } +} +``` diff --git a/content/nap-dos/monitoring/types-of-logs.md b/content/nap-dos/monitoring/types-of-logs.md new file mode 100644 index 000000000..dbe488cbf --- /dev/null +++ b/content/nap-dos/monitoring/types-of-logs.md @@ -0,0 +1,78 @@ +--- +description: Learn about the F5 NGINX App Protect DoS Logs Overview. +docs: DOCS-671 +doctypes: +- task +title: NGINX App Protect DoS Logs Overview +toc: true +weight: 130 +--- + +There are 4 types of logs corresponding to App Protect DoS: + +- [Security Log](#security-log): The general picture of the site and how App Protect DoS processed it, including anomalies and signatures found. +- [Operation Log](#operation-log): Events such as configuration errors or warnings. +- [Debug Logs](#debug-log): Technical messages at different levels of severity used to debug and resolve incidents and error behaviors. +- [Request Logging](#request-log): F5 NGINX App Protect DoS adds information to each request logged to NGINX's access logging mechanism. + +{{% note %}} +NGINX does not have audit logs in the sense of *"**who** did **what**"*. This can be done either from the orchestration system controlling NGINX (such as NGINX Controller) or by tracking the configuration files and the systemd invocations using Linux tools. +{{% /note %}} + + {{}} + +|Type|Log Configuration| Configuration Contexts| File Destination| Syslog Destination | +|----|-----------------|-----------------------|-----------------|--------------------| +| Debug | Log file name is the redirection in the invocation of the `admd` command line in the start script | Global (not part of `nginx.conf`)|Yes. Log file is in /var/log/adm/admd.log directory. There is currently no file rotation capability available for this log.| No | +| Operation | `error_log` directive, part of core NGINX | `nginx.conf` - global | Yes, NGINX error log | Yes, NGINX error log | +|Request |NGINX has two directives for the access log:
      - **access_log** - to turn [on\|off]
      - **log_format** - to specify the required information regarding each request

      NGINX App Protect DoS has several variables that can be added to the log_format directive, such as $app_protect_dos_outcome.

      For more information refer to [NGINX App Protect DoS Access Log]({{< relref "/nap-dos/monitoring/access-log.md" >}}) | `nginx.conf` - global| Yes, NGINX access log | Yes, NGINX access log | +| Security | NGINX App Protect DoS has two directives in `nginx.conf`:
      - app_protect_dos_security_log_enable to turn logging [on\|off]
      - app_protect_dos_security_log to set it's logging configuration and destination

      For more information refer:
      - **Configuration**: [App Protect DoS - Directives and Policy]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md">}})
      - **Usage**: [NGINX App Protect DoS - Security Log]({{< relref "/nap-dos/monitoring/security-log.md" >}}) | `nginx.conf`: http, server, location | Yes, either stderr, or an absolute path to a local file are supported | Yes | + + {{
      }} + +## Security Log + The security logs contain information about the status of the protected objects. It gives a general picture about each protected object in terms of traffic intensity, health of the backend server, learning and mitigations. For more information refer to [NGINX App Protect DoS Security Log]({{< relref "/nap-dos/monitoring/security-log.md" >}}) documentation. + +## Operation Log + The operation logs consists of system operational and health events. The events are sent to the NGINX error log and are distinguished by the `APP_PROTECT_DOS` prefix followed by JSON body. The log level depends on the event: success is usually indicated by `notice`, while failure is indicated by `error`. The timestamp is inherent in the error log. For more information refer to [App Protect DoS Operation Log]({{< relref "/nap-dos/monitoring/operation-log.md" >}}) documentation. + +## Request Log + Access log is NGINX’s request log mechanism. It is controlled by two directives. + +### log_format + This directive determines the format of the log messages using predefined variables. App Protect DoS will enrich this set of variables with several security log attributes that are available to be included in the `log_format`. If `log_format` is not specified then the built-in format `combined` is used but, because that format does not include the extended App Protect DoS variables, this directive must be used when the user wants to add App Protect DoS information to the log. + +### access_log +This directive determines the destination of the `access_log` and the name of the format. The default is the file `/var/log/nginx/access.log` using the combined format. In order to use the custom format that includes the NGINX App Protect DoS variables, use this directive with the name of the desired format. + +### App Protect DoS Variables +These are the variables added to Access Log. They are a subset of the Security log attributes. The Security log names are prefixed with `$app_protect_dos`.
      For more information refer to [NGINX App Protect DoS Access Log]({{< relref "/nap-dos/monitoring/access-log.md" >}}) + +## Debug Log - NGINX App Protect DoS +The NGINX App Protect DoS Debug log is used to troubleshoot the functionality of the product.
      + +The path of the log is at a fixed location: `/var/log/adm/admd.log`. + +There are several log levels - `error`, `warning`, `info` and `debug`. The default is `info`. + +In order to change the log level at run time, the following command can be called: + +```shell +admd -l DEBUG_LEVEL +``` + +{{% note %}} +`nginx.conf` does not refer to the NGINX App Protect DoS debug log configuration neither directly nor indirectly. +{{% /note %}} + +## NGINX Error log + +The NGINX Error log is used to troubleshoot the configuration portion of NGINX App Protect DoS. + +The file is called `error.log` and its path and debug level is determined in `nginx.conf` by the directive `error_log`.
      + +For example: + +```shell +error_log /var/log/nginx/error.log debug; +``` diff --git a/content/nap-dos/releases/_index.md b/content/nap-dos/releases/_index.md new file mode 100644 index 000000000..33ea5b259 --- /dev/null +++ b/content/nap-dos/releases/_index.md @@ -0,0 +1,9 @@ +--- +description: Stay up-to-date with the latest F5 NGINX App Protect DoS release. +menu: + docs: + parent: NGINX App Protect DoS Documentation. +title: Releases +weight: 220 +url: /nginx-app-protect-dos/releases/ +--- diff --git a/content/nap-dos/releases/about-1.0.md b/content/nap-dos/releases/about-1.0.md new file mode 100644 index 000000000..5a00a47b0 --- /dev/null +++ b/content/nap-dos/releases/about-1.0.md @@ -0,0 +1,76 @@ +--- +title: NGINX App Protect 1.0 +toc: true +weight: 260 +docs: DOCS-672 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v1. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 1.0 + +July 6, 2021 + +### New Features + +#### Protection against application layer Denial of Service attacks + +- GET and POST flood
      +- Slowloris, Slowread, Slowpost
      +- Distributed variations of attacks (see above)
      +- Challenge Collapsar (CC) attack/random URIs
      + +#### False-positive management mechanisms + +- HTTP Redirection +- Client-side validation +- TLS fingerprinting + +#### Use Cases + +#### Application Types + +- Traditional HTML-based web applications +- XML-based web services +- REST APIs (JSON) + +#### Deployment Options + +- Kubernetes Per-pod proxy +- Kubernetes Per-service proxy +- API Gateway +- Traditional edge proxy + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ + +- app-protect-dos-24+1.69.6-1.el7.ngx.el7.ngx.x86_64.rpm + +##### Debian 10 + +- app-protect-dos_24+1.69.6-1~buster_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_24+1.69.6-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_24+1.69.6-1~focal_amd64.deb + +#### NGINX Plus + +- NGINX Plus R24 + +### Known Issues + +- `proxy_request_buffering off` is not supported. + +- NGINX App Protect DoS does not protect `grpc` and `http2` services. The traffic is bypassed. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. diff --git a/content/nap-dos/releases/about-1.1.0.md b/content/nap-dos/releases/about-1.1.0.md new file mode 100644 index 000000000..69a60952d --- /dev/null +++ b/content/nap-dos/releases/about-1.1.0.md @@ -0,0 +1,47 @@ +--- +title: NGINX App Protect DoS Arbitrator 1.1.0 +toc: true +weight: 220 +docs: DOCS-673 +--- + +Here you can find the release information for F5 NGINX App Protect DoS Arbitrator v1.1.0. + +## Arbitrator Service Release 1.1.0 + +December 1, 2021 + +This release is focused on security and stability. + +### New Features + +- Improve security by enabling the arbitrator to work as a non-root user. + +- Remove operating system dependencies to work as a native service utilizing golang. + +### Resolved Issues + +- Special characters like a slash inside the protected object name prevented Arbitrator to save the state file. + +### Important Notes + +- The current release upgrades Arbitrator service only. This change is agnostic to NGINX App Protect DoS functionalities. + +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for these use cases, otherwise, the attack will not be detected. + +- gRPC and HTTP/2 protection are available only on Debian 10, Ubuntu 18.04, and Ubuntu 20.04 platforms. For the rest of the platforms, NGINX App Protect DoS does not protect gRPC and HTTP/2 services. The traffic is bypassed. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- New optional configuration parameters of the directive `app_protect_dos_monitor` to support gRPC and HTTP/2 protocols. +- Added new fields in Security Log: + + - `baseline_dps` (datagrams per second) instead of `baseline_tps`, `incoming_datagrams`
      + - `successful_responses` instead of `successful_transactions`
      + - `unsuccessful_requests` instead of `unsuccessful_requests_count`. + +- In the case of an upgrade from the previous `app-protect-dos` version, it's necessary to remove the old `nginx-plus` and install the new `app-protect-dos` that will install a correspondent version of `nginx-plus` as described in the [NGINX App Protect DoS Deployment Guide]({{< relref "/nap-dos/deployment-guide/learn-about-deployment.md" >}}). diff --git a/content/nap-dos/releases/about-1.1.1.md b/content/nap-dos/releases/about-1.1.1.md new file mode 100644 index 000000000..c9f90779e --- /dev/null +++ b/content/nap-dos/releases/about-1.1.1.md @@ -0,0 +1,19 @@ +--- +title: NGINX App Protect DoS Arbitrator 1.1.1 +toc: true +weight: 115 +docs: DOCS-1205 +--- + +Here you can find the release information for F5 NGINX App Protect DoS Arbitrator v1.1.1. + +## Arbitrator Service Release 1.1.1 + +May 2, 2023 + +This release is focused on security. New image is backward compatible with the previous version. + +### Resolved Issues + +- Removed unused REDIS module + diff --git a/content/nap-dos/releases/about-2.0.md b/content/nap-dos/releases/about-2.0.md new file mode 100644 index 000000000..8425c1b61 --- /dev/null +++ b/content/nap-dos/releases/about-2.0.md @@ -0,0 +1,88 @@ +--- +title: NGINX App Protect DoS 2.0 +toc: true +weight: 220 +docs: DOCS-674 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v2.0. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 2.0 + +October 20, 2021 + +### New Features + +#### [Protection of gRPC services against application layer Denial of Service attacks]({{< relref "/nap-dos/deployment-guide/learn-about-deployment.md" >}}) + +- **Message flood** + Attacker supplies multiple gRPC requests that exceed the service capacity. +- **Concurrent large requests** + Attacker supplies a number of concurrent large requests that exceed the server capacity of concurrent requests. +- **Slow gRPC POST** + Attacker supplies a number of concurrent slow POST gRPC requests that exceed the server capacity of concurrent requests. +- **HTTP/2 attack on gRPC service** + Attacker runs typical DoS HTTP/2 attacks: HTTP flood and slow attacks on gRPC service. + +#### [Protection of HTTP/2 services against application layer Denial of Service attacks]({{< relref "/nap-dos/deployment-guide/learn-about-deployment.md" >}}) + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ / UBI7 + +- app-protect-dos-25+2.0.1-1.el7.ngx.el7.ngx.x86_64.rpm + +##### Debian 10 + +- app-protect-dos_25+2.0.1-1~buster_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_25+2.0.1-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_25+2.0.1-1~focal_amd64.deb + +#### NGINX Plus + +- NGINX Plus R25 + +### Resolved Issues + +- Security log keeps working on removed Protected Objects. + +- Monitoring requests show up in the access log. + +- `app_protect_dos_name` longer than 32 characters creates a garbage name in the logs. + +- Created protected objects for the not configured contexts. + +- Wrong reporting of attack status with arbitrator. + +- Wrong `impact_rps` value in **Bad actor expired** log message. + +- Rate limit in Access Log should be optional. + +### Important Notes + +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for these use cases, otherwise, the attack will not be detected. + +- gRPC and HTTP/2 protection are available only on Debian 10, Ubuntu 18.04 and Ubuntu 20.04 platforms. For the rest of the platforms, NGINX App Protect DoS does not protect gRPC and HTTP/2 services. The traffic is bypassed. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- New optional configuration parameters of the directive `app_protect_dos_monitor` to support gRPC and HTTP/2 protocols. +- Added new fields in Security Log: + + - `baseline_dps` (datagrams per second) instead of `baseline_tps`, `incoming_datagrams`
      + - `successful_responses` instead of `successful_transactions`
      + - `unsuccessful_requests` instead of `unsuccessful_requests_count`. + +- In the case of an upgrade from the previous `app-protect-dos` version, it's necessary to remove the old `nginx-plus` and install the new `app-protect-dos` that will install a correspondent version of `nginx-plus` as described in the [NGINX App Protect DoS Deployment Guide]({{< relref "/nap-dos/deployment-guide/learn-about-deployment.md" >}}). diff --git a/content/nap-dos/releases/about-2.1.md b/content/nap-dos/releases/about-2.1.md new file mode 100644 index 000000000..7e4a5fbf7 --- /dev/null +++ b/content/nap-dos/releases/about-2.1.md @@ -0,0 +1,68 @@ +--- +title: NGINX App Protect DoS 2.1 +toc: true +weight: 200 +docs: DOCS-831 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v2.1. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 2.1 + +December 29, 2021 + +### New Features + +- [Support for RHEL (7.4.x and above) Virtual Machine (VM) deployment]({{< relref "learn-about-deployment.md#rhel-74-installation" >}}) +- [Support for RHEL 8 and UBI 8]({{< relref "learn-about-deployment.md#rhel-8-installation" >}}) +- [GRPC and HTTP/2 protection support for Centos (7.4.x and above) and RHEL (7.4.x and above)]({{< relref "/nap-dos/deployment-guide/learn-about-deployment.md" >}}) + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-25+2.1.8-1.el7.ngx.el7.ngx.x86_64.rpm + +##### RHEL 8 / UBI8 + +- app-protect-dos-25+2.1.8-1.el8.ngx.el8.ngx.x86_64.rpm + +##### Debian 10 + +- app-protect-dos_25+2.1.8-1~buster_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_25+2.1.8-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_25+2.1.8-1~focal_amd64.deb + +#### NGINX Plus + +- NGINX Plus R25 + +### Resolved Issues + +- The `app_protect_dos_name` directive is not inherited by the inner blocks, causing to have more VSs than expected. + +- Signature should not be created if good and bad actor use the same type of traffic. + +- When there's a clear anomaly on the User-Agent header signal, the signature doesn't include it. + +- HTTP Method signal is named incorrectly in signatures. + +### Important Notes + +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for these use cases, otherwise, the attack will not be detected. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 and RHEL 7 / UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- The recommended option of running NGINX Plus in a Docker Container is with the `daemon off` flag. It's mandatory for UBI 8. diff --git a/content/nap-dos/releases/about-2.2.md b/content/nap-dos/releases/about-2.2.md new file mode 100644 index 000000000..b62f2f96f --- /dev/null +++ b/content/nap-dos/releases/about-2.2.md @@ -0,0 +1,80 @@ +--- +title: NGINX App Protect DoS 2.2 +toc: true +weight: 180 +docs: DOCS-839 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v2.2. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 2.2 + +February 15, 2022 + +In this release, support for NGINX App Protect DoS is added to NGINX Plus R26. + +### New Features + +- **Improve Signature Lifecycle** + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-26+2.2.20-1.el7.ngx.el7.ngx.x86_64.rpm + +##### RHEL 8 / UBI8 + +- app-protect-dos-26+2.2.20-1.el8.ngx.el8.ngx.x86_64.rpm + +##### Debian 10 + +- app-protect-dos_26+2.2.20-1~buster_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_26+2.2.20-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_26+2.2.20-1~focal_amd64.deb + +#### NGINX Plus + +- NGINX Plus R26 + +### Resolved Issues + +- Monitor requests are sent in new connections. It improves monitoring health capability and allows better detection of slow POST attacks. + +- Adaptive memory allocation by adminstall in order to support maximum cores. + The amount of allocated memory for NGINX App Protect DoS is controlled by an argument of adminstall. + The default value is 80 MB. For certain deployments, this amount of memory size is not enough. The default memory size is adaptive now. If the Virtual Machine (VM) has a big number of CPU cores, then we can also increase the amount of memory. For the case of up to 4 CPU cores, the allocated memory is 80MB, for more than 4 CPU cores, the allocated memory will be calculated as 80MB + CPUs * 2.5MB + + +### Important Notes + +- Misconfiguration of `app_protect_dos_monitor` can cause a false attack declaration. Port configuration should correspond to the port the server listens to. + + For example: + + ```shell + server { + listen 8080; + server_name myservice.com; + location / { + app_protect_dos_monitor "myservice.com:8080/"; + } + } + ``` +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for these use cases, otherwise, the attack will not be detected. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 and RHEL 7 / UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- The recommended option of running NGINX Plus in a Docker Container is with the `daemon off` flag. It's mandatory for UBI 8. diff --git a/content/nap-dos/releases/about-2.3.md b/content/nap-dos/releases/about-2.3.md new file mode 100644 index 000000000..da857532e --- /dev/null +++ b/content/nap-dos/releases/about-2.3.md @@ -0,0 +1,90 @@ +--- +title: NGINX App Protect DoS 2.3 +toc: true +weight: 170 +docs: DOCS-856 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v2.3. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 2.3 + +May 9, 2022 + +### New Features + +- [Support for Alpine 3.15 deployment]({{< relref "learn-about-deployment.md#alpine-315-installation" >}}) +- [DoS Live Activity Monitoring]({{< relref "/nap-dos/monitoring/live-activity-monitoring.md" >}}) +- [New Arbitrator FQDN/IP directive]({{< relref "learn-about-directives-and-policy.md#arbitrator-fqdn-directive-app_protect_dos_arb_fqdn" >}}) + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-26+2.3.46-1.el7.ngx.el7.ngx.x86_64.rpm + +##### RHEL 8 / UBI8 + +- app-protect-dos-26+2.3.46-1.el8.ngx.el8.ngx.x86_64.rpm + +##### Debian 10 + +- app-protect-dos_26+2.3.46-1~buster_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_26+2.3.46-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_26+2.3.46-1~focal_amd64.deb + +##### Alpine 3.15 + +- app-protect-dos-26.2.3.48-r1.apk + +#### NGINX Plus + +- NGINX Plus R26 + +### Resolved Issues + +- Improved Security logger configuration parsing: Detects invalid JSON structure, generates an appropriate message if field/value is not valid, doesn't stop on the first failure. + +### Important Notes + +- Misconfiguration of `app_protect_dos_monitor` potentially can cause a false attack declaration. +Port configuration should correspond to the port the server listens to. + + For example: + + ```shell + server { + listen 8080; + server_name myservice.com; + location / { + app_protect_dos_monitor "myservice.com:8080/"; + } + } + ``` + +Please note that the above syntax for the Monitor directive is old but still supported. + +The new syntax for the Monitor directive is as stated below, where **protocol** and **timeout** arguments are optional and default. Refer to [Learn about Directives and Policy](/nginx-app-protect-dos/directives-and-policy/learn-about-directives-and-policy/#monitor-directive-app_protect_dos_monitor) for more details on Monitor directive new syntax. + +For example: + +`app_protect_dos_monitor uri=myservice.com:8080/ protocol=http1 timeout=5;` +

      + +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for these use cases, otherwise, the attack will not be detected. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 and RHEL 7/UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- The recommended option of running NGINX Plus in a Docker Container is with the `daemon off` flag. It's mandatory for UBI 8. diff --git a/content/nap-dos/releases/about-2.4.md b/content/nap-dos/releases/about-2.4.md new file mode 100644 index 000000000..c59ea1996 --- /dev/null +++ b/content/nap-dos/releases/about-2.4.md @@ -0,0 +1,92 @@ +--- +title: NGINX App Protect DoS 2.4 +toc: true +weight: 160 +docs: DOCS-890 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v2.4. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 2.4 + +June 28, 2022 + +### New Features + +- [Support for Debian 11]({{< relref "learn-about-deployment.md#debian-10--debian-11-installation" >}}) +- Accelerated bad actors detection when the signatures mitigation is disabled. + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-27+2.4.1-1.el7.ngx.el7.ngx.x86_64.rpm + +##### RHEL 8 / UBI8 + +- app-protect-dos-27+2.4.1-1.el8.ngx.el8.ngx.x86_64.rpm + +##### Debian 10 + +- app-protect-dos_27+2.4.1-1~buster_amd64.deb + +##### Debian 11 + +- app-protect-dos_27+2.4.1-1~bullseye_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_27+2.4.1-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_27+2.4.1-1~focal_amd64.deb + +##### Alpine 3.15 + +- app-protect-dos-27.2.4.1-r1.apk + +#### NGINX Plus + +- NGINX Plus R27 + +### Resolved Issues + +- Honoring allow/deny directives for restricting access to the NGINX App Protect DoS Live Activity Monitoring API location. + +### Important Notes + +- If NGINX App Protect WAF is installed, app protect should be disabled for the location of DoS Live Activity Monitoring API + + For example: + + ```shell + location /api { + app_protect_enable off; + app_protect_dos_api; + } + ``` + +- Misconfiguration of `app_protect_dos_monitor` potentially can cause a false attack declaration. +Port configuration should correspond to the port the server listens to. + + For example: + + ```shell + server { + listen 8080; + location / { app_protect_dos_monitor "myservice.com:8080"; } + } + ``` + +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for these use cases, otherwise, the attack will not be detected. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 and RHEL 7 / UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- The recommended option of running NGINX Plus in a Docker Container is with the `daemon off` flag. It's mandatory for UBI 8. diff --git a/content/nap-dos/releases/about-3.0.md b/content/nap-dos/releases/about-3.0.md new file mode 100644 index 000000000..8618a5fc3 --- /dev/null +++ b/content/nap-dos/releases/about-3.0.md @@ -0,0 +1,94 @@ +--- +title: NGINX App Protect DoS 3.0 +toc: true +weight: 150 +docs: DOCS-946 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v3.0. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 3.0 + +September 21, 2022 + +### New Features + +- L4 mitigation (with eBPF) +- DoS Live Activity Monitoring with requests mitigation graphs +- DoS Live Activity Monitoring support for multi-instances NGINX App Protect DoS setups (multi-VMs, multi-replicas) + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-27+3.0.3-1.el7.ngx.el7.ngx.x86_64.rpm + +##### RHEL 8 / UBI8 + +- app-protect-dos-27+3.0.3-1.el8.ngx.el8.ngx.x86_64.rpm + +##### Debian 10 + +- app-protect-dos_27+3.0.3-1~buster_amd64.deb + +##### Debian 11 + +- app-protect-dos_27+3.0.3-1~bullseye_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_27+3.0.3-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_27+3.0.3-1~focal_amd64.deb + +##### Alpine 3.15 + +- app-protect-dos-27.3.0.3-r1.apk + +#### NGINX Plus + +- NGINX Plus R27 + + +### Important Notes + +- L4 (eBPF) mitigation helps mitigate volumetric attacks by slowing down the opening of TCP connections by the attackers. +It is recommended to deploy NGINX App Protect DoS with L4 (eBPF) mitigation at the perimeter network or behind L3 load balancer. +Installing NGINX App Protect DoS with L4 (eBPF) mitigation behind L4/L7 load balancer may result in the load balancer's starvation during an attack. + +- If NGINX App Protect WAF is installed, app protect should be disabled for the location of DoS Live Activity Monitoring API. + + For example: + + ```shell + location /api { + app_protect_enable off; + app_protect_dos_api; + } + ``` + +- Misconfiguration of `app_protect_dos_monitor` potentially can cause a false attack declaration. +Port configuration should correspond to the port the server listens to. + + For example: + + ```shell + server { + listen 8080; + location / { app_protect_dos_monitor "myservice.com:8080"; } + } + ``` + +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for these use cases, otherwise, the attack will not be detected. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 and RHEL 7 / UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- The recommended option of running NGINX Plus in a Docker Container is with the `daemon off` flag. It's mandatory for UBI 8. diff --git a/content/nap-dos/releases/about-3.1.md b/content/nap-dos/releases/about-3.1.md new file mode 100644 index 000000000..7ff80d589 --- /dev/null +++ b/content/nap-dos/releases/about-3.1.md @@ -0,0 +1,98 @@ +--- +title: NGINX App Protect DoS 3.1 +toc: true +weight: 140 +docs: DOCS-995 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v3.1. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 3.1 + +November 29, 2022 + +In this release, NGINX App Protect DoS supports NGINX Plus R28. + +### New Features + +- Support for NGINX Plus R28. +- NGINX App protect DoS can be deployed behind L4/L7 load balancers when L4 protection is enabled. +- Fixed the issue: Installing NGINX App protect DoS with L4 (eBPF) mitigation behind L4/L7 load balancer may result in the load balancer's starvation during an attack. +- Support for [proxy_protocol]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#monitor-directive-app_protect_dos_monitor" >}}) configuration for server health monitoring.
      +Previously, server health monitoring could not be used when the listen directive of the correspondent server block contained the `proxy_protocol` parameter.
      +This disallowed using NGINX App protect DoS for Denial of Service (DoS) protection for HTTP2 and gRPC protected objects in the `proxy_protocol` configuration. + + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-28+3.1.7-1.el7.ngx.x86_64.rpm + +##### RHEL 8 / UBI8 + +- app-protect-dos-28+3.1.7-1.el8.ngx.x86_64.rpm + +##### Debian 11 + +- app-protect-dos_28+3.1.7-1~bullseye_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_28+3.1.7-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_28+3.1.7-1~focal_amd64.deb + +##### Alpine 3.15 + +- app-protect-dos-28.3.1.7-r1.apk + +#### NGINX Plus + +- NGINX Plus R28 + + +### Important Notes + +- Installing L4 accelerated mitigation feature (install `app-protect-dos-ebpf`) configures `nginx` and `admd` to run with root privileges. + +- Support for `proxy_protocol` configuration: `proxy_protocol` monitor parameter should be used when the listen directive of the correspondent server block contains the `proxy_protocol` parameter. + +- If NGINX App Protect WAF is installed, app protect should be disabled for the location of DoS Live Activity Monitoring API. + + For example: + + ```shell + location /api { + app_protect_enable off; + app_protect_dos_api; + } + ``` + +- Misconfiguration of `app_protect_dos_monitor` potentially can cause a false attack declaration. +Port configuration should correspond to the port the server listens to. + + For example: + + ```shell + server { + listen 8080; + location / { app_protect_dos_monitor "myservice.com:8080"; } + } + ``` + +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for these use cases, otherwise, the attack will not be detected. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 and RHEL 7 / UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Monitor directive (app_protect_monitor) with the `proxy_protocol` parameter can not be configured on Ubuntu 18.04. As a result, gRPC and HTTP/2 DoS protection for `proxy_protocol` configuration is not supported. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- The recommended option of running NGINX Plus in a Docker Container is with the `daemon off` flag. It's mandatory for UBI 8. diff --git a/content/nap-dos/releases/about-4.0.md b/content/nap-dos/releases/about-4.0.md new file mode 100644 index 000000000..3b78940c7 --- /dev/null +++ b/content/nap-dos/releases/about-4.0.md @@ -0,0 +1,91 @@ +--- +title: NGINX App Protect DoS 4.0 +toc: true +weight: 120 +docs: DOCS-1115 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v4.0. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 4.0 + +January 31, 2023 + +### New Features + +- Distributed Denial of Service (DDoS) protection feature for WebSocket services.
      Refer to the [Configuration Example]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#monitor-directive-app_protect_dos_monitor" >}}) for WebSocket services here.
      +- DDoS protection against slow attacks has been improved using machine learning algorithm on all types of traffic. +- `app_protect_dos_monitor` directive, which monitors the proxied server, supports a new type of protocol - "WebSocket". + + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-28+4.0.1-1.el7.ngx.x86_64.rpm + +##### RHEL 8 / UBI8 + +- app-protect-dos-28+4.0.1-1.el8.ngx.x86_64.rpm + +##### Debian 11 + +- app-protect-dos_28+4.0.1.-1~bullseye_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_28+4.0.1-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_28+4.0.1-1~focal_amd64.deb + +##### Alpine 3.15 + +- app-protect-dos-28.4.0.1-r1.apk + +#### NGINX Plus + +- NGINX Plus R28 + +### Important Notes + +- WebSocket protection requires active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for these use cases, otherwise, the attack will not be detected. + +- Installing accelerated mitigation feature (install `app-protect-dos-ebpf`) configures `nginx` and `admd` to run with root privileges. + +- Support for `proxy_protocol` configuration: `proxy_protocol` monitor parameter should be used when the `listen` directive of the correspondent server block contains the `proxy_protocol` parameter. + +- If NGINX App Protect WAF is installed, app protect should be disabled for the location of DoS Live Activity Monitoring API. + + For example: + + ```shell + location /api { + app_protect_enable off; + app_protect_dos_api; + } + ``` + +- Port configuration in `app_protect_dos_monitor` should correspond to the port, the server listens to. Misconfiguration can potentially cause a false attack declaration. + + For example: + + ```shell + server { + listen 8080; + location / { app_protect_dos_monitor "myservice.com:8080"; } + } + ``` + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for the attack to be detected. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 and RHEL 7 / UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Monitor directive `app_protect_dos_monitor` with `proxy_protocol` parameter can not be configured on Ubuntu 18.04. As a result, gRPC and HTTP/2 DoS protection for `proxy_protocol` configuration is not supported. + +- Slow attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- The recommended option of running NGINX Plus in a Docker Container is with the `daemon off` flag. It's mandatory for UBI 8. diff --git a/content/nap-dos/releases/about-4.1.md b/content/nap-dos/releases/about-4.1.md new file mode 100644 index 000000000..50c369345 --- /dev/null +++ b/content/nap-dos/releases/about-4.1.md @@ -0,0 +1,94 @@ +--- +title: NGINX App Protect DoS 4.1 +toc: true +weight: 110 +docs: DOCS-1203 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v4.1. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 4.1 + +May 2, 2023 + +In this release, NGINX App Protect DoS supports NGINX Plus R29. + +### New Features + +- Support for NGINX Plus R29 +- Support for Rocky Linux 8 + +### Supported Packages + +#### App Protect DoS + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-29+4.1.2-1.el7.ngx.x86_64.rpm + +##### RHEL 8 and Rocky Linux 8 + +- app-protect-dos-29+4.1.2-1.el8.ngx.x86_64.rpm + +##### Debian 11 + +- app-protect-dos_29+4.1.2-1~bullseye_amd64.deb + +##### Ubuntu 18.04 + +- app-protect-dos_29+4.1.2-1~bionic_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_29+4.1.2-1~focal_amd64.deb + +##### Alpine 3.15 + +- app-protect-dos-29.4.1.2-r1.apk + +#### NGINX Plus + +- NGINX Plus R29 + + +### Important Notes + +- Installing L4 accelerated mitigation feature (install `app-protect-dos-ebpf`) configures `nginx` and `admd` to run with root privileges. + +- Support for `proxy_protocol` configuration: `proxy_protocol` monitor parameter should be used when the `listen` directive of the correspondent server block contains the `proxy_protocol` parameter. + +- If NGINX App Protect WAF is installed, app protect should be disabled for the location of DoS Live Activity Monitoring API. + + For example: + + ```shell + location /api { + app_protect_enable off; + app_protect_dos_api; + } + ``` + +- Port configuration in `app_protect_dos_monitor` should correspond to the port, the server listens to. Misconfiguration can potentially cause a false attack declaration. + + For example: + + ```shell + server { + listen 8080; + location / { app_protect_dos_monitor "myservice.com:8080"; } + } + ``` + +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for the attack to be detected. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 and RHEL 7 / UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Monitor directive `app_protect_dos_monitor` with `proxy_protocol` parameter can not be configured on Ubuntu 18.04. As a result, gRPC and HTTP/2 DoS protection for `proxy_protocol` configuration is not supported. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- The recommended option of running NGINX Plus in a Docker Container is with the `daemon off` flag. It's mandatory for UBI 8. + +- The package dependencies for NGINX App Protect DoS have changed in this release, replacing the `curl` dependencies with `libcurl` only. For more information, see the [NGINX App Protect DoS Deployment Guide]({{< relref "/nap-dos/deployment-guide/learn-about-deployment.md#prerequisites" >}}). diff --git a/content/nap-dos/releases/about-4.2.md b/content/nap-dos/releases/about-4.2.md new file mode 100644 index 000000000..b3fc4b951 --- /dev/null +++ b/content/nap-dos/releases/about-4.2.md @@ -0,0 +1,100 @@ +--- +title: NGINX App Protect DoS 4.2 +toc: true +weight: 100 +docs: DOCS-1254 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v4.2. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 4.2 + +August 15, 2023 + +In this release, NGINX App Protect DoS supports NGINX Plus R30. + +### New Features + +- Support for Nginx Plus R30 +- [Support for Ubuntu 22.04]({{< relref "learn-about-deployment.md#debian--ubuntu-installation" >}}) +- Support for HTTP3/QUIC +- Improvement of Embedded Server Health mechanism + +### Supported Packages + +#### App Protect DoS + +##### Alpine 3.15 + +- app-protect-dos-30.4.2.0-r1.apk + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-30.4.2.0-1.el7.ngx.x86_64.rpm + +##### RHEL 8 and Rocky Linux 8 + +- app-protect-dos-30.4.2.0-1.el8.ngx.x86_64.rpm + +##### Debian 11 + +- app-protect-dos_30.4.2.0-1~bullseye_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_30.4.2.0-1~focal_amd64.deb + +##### Ubuntu 22.04 + +- app-protect-dos_30.4.2.0-1~jammy_amd64.deb + + +#### NGINX Plus + +- NGINX Plus R30 + + +### Important Notes + +- Installing L4 accelerated mitigation feature (install `app-protect-dos-ebpf`) configures `nginx` and `admd` to run with root privileges. + +- Support for `proxy_protocol` configuration: `proxy_protocol` monitor parameter should be used when the `listen` directive of the correspondent server block contains the `proxy_protocol` parameter. + +- If NGINX App Protect WAF is installed, app protect should be disabled for the location of DoS Live Activity Monitoring API. + + For example: + + ```shell + location /api { + app_protect_enable off; + app_protect_dos_api; + } + ``` + +- Port configuration in `app_protect_dos_monitor` should correspond to the port, the server listens to. Misconfiguration can potentially cause a false attack declaration. + + For example: + + ```shell + server { + listen 8080; + server_name myservice.com; + location / { + app_protect_dos_monitor "myservice.com:8080/"; + } + } + ``` + +- `proxy_request_buffering` off is not supported. + +- gRPC and HTTP/2 protection require active monitoring of the protected service. The directive `app_protect_dos_monitor` is mandatory for the attack to be detected. + +- [TLS fingerprint]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#policy-directive-app_protect_dos_policy_file" >}}) feature is not used in CentOS 7.4 and RHEL 7 / UBI 7 due to the old OpenSSL version. The required OpenSSL version is 1.1.1 or higher. + +- Slow POST attack always mitigates with block action while other types of attacks can also be mitigated with redirection or JS challenges. + +- The recommended option of running NGINX Plus in a Docker Container is with the `daemon off` flag. It's mandatory for UBI 8. + +- The package dependencies for NGINX App Protect DoS have changed in this release, replacing the `curl` dependencies with `libcurl` only. For more information, see the [NGINX App Protect DoS Deployment Guide]({{< relref "/nap-dos/deployment-guide/learn-about-deployment.md#prerequisites" >}}). + +- Starting with this release, Ubuntu 18.04 support has been deprecated. diff --git a/content/nap-dos/releases/about-4.3.md b/content/nap-dos/releases/about-4.3.md new file mode 100644 index 000000000..3bd1598b3 --- /dev/null +++ b/content/nap-dos/releases/about-4.3.md @@ -0,0 +1,58 @@ +--- +title: NGINX App Protect DoS 4.3 +toc: true +weight: 90 +docs: DOCS-1361 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v4.3. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 4.3 + +December 19, 2023 + +In this release, NGINX App Protect DoS supports NGINX Plus R31. + +### New Features + +- Support for Nginx Plus R31 +- [Support for Alpine 3.17]({{< relref "learn-about-deployment.md#alpine-315x--317x-installation" >}}) + + +### Supported Packages + +#### App Protect DoS + +##### Alpine 3.17 + +- app-protect-dos-31.4.3.1-r1.apk + +##### CentOS 7.4+ / RHEL 7.4+ / UBI7 + +- app-protect-dos-31.4.3.1-1.el7.ngx.x86_64.rpm + +##### RHEL 8 and Rocky Linux 8 + +- app-protect-dos-31.4.3.1-1.el8.ngx.x86_64.rpm + +##### Debian 11 + +- app-protect-dos_31.4.3.1-1~bullseye_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_31.4.3.1-1~focal_amd64.deb + +##### Ubuntu 22.04 + +- app-protect-dos_31.4.3.1-1~jammy_amd64.deb + + +#### NGINX Plus + +- NGINX Plus R31 + + +### Important Note + +- Starting with this release, Alpine 3.15 support has been deprecated. diff --git a/content/nap-dos/releases/about-4.4.md b/content/nap-dos/releases/about-4.4.md new file mode 100644 index 000000000..db3dc4a97 --- /dev/null +++ b/content/nap-dos/releases/about-4.4.md @@ -0,0 +1,63 @@ +--- +title: NGINX App Protect DoS 4.4 +toc: true +weight: 80 +docs: DOCS-1361 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v4.4. NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +## Release 4.4 + +May 29, 2024 + +In this release, NGINX App Protect DoS supports NGINX Plus R32. + +### New Features + +- Support for NGINX Plus R32 +- Support for Debian 12 +- Support for RHEL 9 +- [Allowlisted IPs]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md#access-file-directive-app_protect_dos_access_file" >}}) +- Support for up to 1000 protected objects, ensuring high scalability and efficient resource utilization. + +### Supported Packages + +#### App Protect DoS + +##### Alpine 3.17 + +- app-protect-dos-32+4.4.1-r1.apk + +##### CentOS 7.4+ / RHEL 7.4+ + +- app-protect-dos-32+4.4.1-1.el7.ngx.x86_64.rpm + +##### RHEL 8 and Rocky Linux 8 + +- app-protect-dos-32+4.4.1-1.el8.ngx.x86_64.rpm + +##### RHEL 9 + +- app-protect-dos-32+4.4.1-1.el9.ngx.x86_64.rpm + +##### Debian 11 + +- app-protect-dos_32+4.4.1-1~bullseye_amd64.deb + +##### Debian 12 + +- app-protect-dos_32+4.4.1-1~bookworm_amd64.deb + +##### Ubuntu 20.04 + +- app-protect-dos_32+4.4.1-1~focal_amd64.deb + +##### Ubuntu 22.04 + +- app-protect-dos_32+4.4.1-1~jammy_amd64.deb + + +#### NGINX Plus + +- NGINX Plus R32 diff --git a/content/nap-dos/releases/about-4.5.md b/content/nap-dos/releases/about-4.5.md new file mode 100644 index 000000000..ac889021e --- /dev/null +++ b/content/nap-dos/releases/about-4.5.md @@ -0,0 +1,45 @@ +--- +title: NGINX App Protect DoS 4.5 +toc: true +weight: 70 +docs: DOCS-000 +--- + +Here you can find the release information for F5 NGINX App Protect DoS v4.5 + +NGINX App Protect DoS provides behavioral protection against Denial of Service (DoS) for your web applications. + +--- + +## Release 4.5 + +Nov 19, 2024 + +NGINX App Protect DoS 4.5 adds support for NGINX Plus R33. + +--- + +### New features + +- Support for NGINX Plus R33 +- Add support for Alpine 3.19 +- Add support for Ubuntu 24.04 +- Remove support for CentOS 7 / RHEL 7 +- *eBPF Manager - Privileged Process for Secure Command Handling* +This feature introduces the eBPF Manager, a process that securely handles eBPF commands on behalf of other processes. By allowing certain processes to operate without elevated privileges, it enhances system security. As part of this enhancement, NGINX now runs under a non-root user account, which is required for deploying specific security solutions. + +--- + +### Supported packages + +| Distribution name | Package file | +|--------------------------|--------------------------------------------------| +| Alpine 3.17 / 3.19 | _app-protect-dos-33+4.5.2-r1.apk_ | +| RHEL 8 and Rocky Linux 8 | _app-protect-dos-33+4.5.2-1.el8.ngx.x86_64.rpm_ | +| RHEL 9 | _app-protect-dos-33+4.5.2-1.el9.ngx.x86_64.rpm_ | +| Debian 11 | _app-protect-dos_33+4.5.2-1\~bullseye_amd64.deb_ | +| Debian 12 | _app-protect-dos_33+4.5.2-1\~bookworm_amd64.deb_ | +| Ubuntu 20.04 | _app-protect-dos_33+4.5.2-1\~focal_amd64.deb_ | +| Ubuntu 22.04 | _app-protect-dos_33+4.5.2-1\~jammy_amd64.deb_ | +| Ubuntu 24.04 | _app-protect-dos_33+4.5.2-1\~noble_amd64.deb_ | +| NGINX Plus | _NGINX Plus R33_ | diff --git a/content/nap-dos/troubleshooting-guide/_index.md b/content/nap-dos/troubleshooting-guide/_index.md new file mode 100644 index 000000000..6ef2f6d32 --- /dev/null +++ b/content/nap-dos/troubleshooting-guide/_index.md @@ -0,0 +1,9 @@ +--- +description: Learn how to troubleshoot your F5 NGINX App Protect DoS deployment. +menu: + docs: + parent: NGINX App Protect DoS Documentation. +title: Troubleshooting Guide +weight: 200 +url: /nginx-app-protect-dos/troubleshooting-guide/ +--- diff --git a/content/nap-dos/troubleshooting-guide/how-to-troubleshoot.md b/content/nap-dos/troubleshooting-guide/how-to-troubleshoot.md new file mode 100644 index 000000000..2e84a793a --- /dev/null +++ b/content/nap-dos/troubleshooting-guide/how-to-troubleshoot.md @@ -0,0 +1,156 @@ +--- +description: Learn about the F5 NGINX App Protect DoS Troubleshooting Guide. +docs: DOCS-675 +doctypes: +- task +title: NGINX App Protect DoS Troubleshooting Guide +toc: true +weight: 200 +--- + +## Overview + +This Troubleshooting Guide is intended to provide guidance to customers in the detection and correction of programming issues in F5 NGINX App Protect DoS. It may also be useful to IT. + +## Resolving Known Problems + +### Configuration + +{{}} + +|Problem|Solution| +|-------|--------| +| NGINX is not running (ps -aux)

      Reloading NGINX fails| Check the error log at `/var/log/nginx/error.log`.
      Fix the problem and re-run NGINX.| +| No original source IP in logs|1. XFF is not configured (or not configured correctly)
      2. External Load Balancer doesn't forward XFF | +| NGINX App Protect DoS functionality is not as expected| NGINX App Protect DoS has several logs which can be used for troubleshooting.
      Usually, it is best to look for any warning or error messages within the logs.
      Refer to [Logs Overview]({{< relref "/nap-dos/monitoring/types-of-logs.md">}})| +| `Too many open files` error message | Increase number of file descriptors.
      For example: `worker_rlimit_nofile 65535;` in the main context of `nginx.conf` file.
      Refer to [worker_rlimit_nofile directive](https://www.nginx.com/blog/using-nginx-plus-with-selinux/) | +| `setrlimit ... failed (Permission denied)` error message | Increase the limit using the following command as the root user:
      `setsebool -P httpd_setrlimit 1;`
      Refer to [Issue 4: Too many files are open Error](https://www.nginx.com/blog/using-nginx-plus-with-selinux/#Issue-4:-%3Ccode%3EToo-many-files-are-open%3C/code%3E-Error) | +| More protected objects than expected | The `app_protect_dos_enable` directive is inherited by all server and location blocks beneath it, each block will be a protected object.
      Consider moving this directive from outer to inner block.
      Refer to: [NGINX App Protect DoS - Directives and Policy]({{< relref "/nap-dos/directives-and-policy/learn-about-directives-and-policy.md" >}}) | +| `No DOS protection for ngx_worker at idx X` warning message | There are more nginx processes than allowed.
      Either decrease the number of nginx processes (ngx_processes directive in `nginx.conf` file) or increase the number of supported workers for NGINX App Protect DoS using the flag `--max-workers NUM` for `/usr/bin/adminstall`. | +| `unknown directive 'app_protect_dos_xxx'` error message | App Protect DOS module is not loaded. Add this line to the main (global) context of nginx.conf:
      `load_module "/etc/nginx/modules/ngx_http_app_protect_dos_module.so";` | +| NGINX struggles handling a high rate of incoming connections | Linux machine should be tuned for optimal performance.
      Refer to [Tuning NGINX for Performance](https://www.nginx.com/blog/tuning-nginx/) | +| Error in `adminstall` process, such as `Failed to allocate` | Insufficient memory to allocate all the required resources.
      Increase the `--memory` size or decrease the number of nginx workers (`--max_workers`) if not all of them are going to be in use.
      Use the `--help` flag for more info. | + +{{
      }} + +### ELK issues + +ELK issues are addressed directly in GitHub by posting the issue to Kibana dashboards for [NGINX App Protect DoS GitHub repo](https://github.com/f5devcentral/nap-dos-elk-dashboards). + +### SELinux + +Configure SELinux to allow NGINX App Protect DoS. + +The configuration steps are found in the [SELinux configuration]({{< relref "/nap-dos/deployment-guide/learn-about-deployment.md#selinux-configuration" >}}) section of the deployment guide. + +If SELinux still denies access to something, it means that one of more security exceptions should be enabled. + +The following steps describe how to find the problematic exception and enable it. + +1. Temporarily add the `httpd_t` domain to the permissive list (this will completely enable all the fields).
      In this configuration SELinux will not deny anything related to NGINX as NGINX is labeled with the `httpd_t` context.
      + +```shell +semanage permissive -a httpd_t +``` + +2. Repeat the scenario which made SELinux deny and see that it now works. +3. In permissive mode, security exceptions are logged to the default Linux audit log. +Due to the previous step, the permitted exception will be logged.
      +The log can be found in `/var/log/audit/audit.log`. +4. The following command will parse the audit log and build a SELinux command that will permit all the exceptions found in the log: + +```shell +grep nginx /var/log/audit/audit.log | audit2allow -m nginx +``` + +5. Compare the generated output to the nginx.te file mentioned in the deployment guide. +Add all the missing commands to the nginx.te file and repeat the SELinux configuration mentioned in the deployment guide. +6. Delete the `httpd_t` domain from the permissive list: + +```shell +semanage permissive -d httpd_t +``` + +For more information about how to use NGINX Plus with SELinux - check our [blog](https://www.nginx.com/blog/using-nginx-plus-with-selinux/) + +### Send Logs to Support + +If there are any problems, collect the troubleshooting information in a tarball and send it to your customer support engineer. + +1. Get package version: + + a. Get NGINX App Protect DoS version:
      + + ```shell + /usr/bin/admd -v > package_versions.txt + ``` + + b. Get packages version:
      For CentOS/RHEL:
      + + ```shell + rpm -qa nginx-plus* app-protect* >> package_versions.txt + ``` + + For Debian/Ubuntu:
      + + ```shell + apt list --installed | grep -E 'nginx-plus|app-protect' >> package_versions.txt + ``` + + c. Get OS version:
      + + ```shell + cat /etc/os-release > system_version.txt && uname -r >> system_version.txt && cat /proc/version >> system_version.txt + ``` + + d. Get NGINX App Protect DoS shared memory dump:
      + + ```shell + admd -c > napd_shmem.txt + ``` + + e. Get Linux shared memory dump:
      + + ```shell + ipcs -m > linux_shmem.txt + ``` + +2. Create a list of files for tarball:
      + a. Create a file using your favorite editor (i.e VI editor)
      + + ```shell + vi logs.txt + ``` + + b. Insert the following content into the file created above:
      + + ```shell + package_versions.txt + system_version.txt + napd_shmem.txt + linux_shmem.txt + /var/log/adm/* + /var/run/adm/* + /var/log/nginx/* + ``` + + c. Add the path of your NGINX configuration files including all references, for example:
      + + ```shell + /etc/nginx/nginx.conf + /etc/nginx/conf.d/* + ``` + + d. Add all policies and log file configuration, for example:
      + + ```shell + /etc/app_protect_dos/* + ``` + +3. Create the tarball: + + ```shell + tar cvfz logs.tgz `cat logs.txt` + ``` + +4. Send `logs.tgz` to your customer support. diff --git a/content/nap-waf/_index.md b/content/nap-waf/_index.md new file mode 100644 index 000000000..bddd99112 --- /dev/null +++ b/content/nap-waf/_index.md @@ -0,0 +1,13 @@ +--- +description: | + Modern app security solution that works seamlessly in DevOps environments. + + Request your [free 30-day trial](https://www.nginx.com/free-trial-request/) today. +linkTitle: F5 NGINX App Protect WAF +menu: docs +title: F5 NGINX App Protect WAF +weight: 100 +url: /nginx-app-protect-waf/ +cascade: + logo: "NGINX-App-Protect-WAF-product-icon.svg" +--- diff --git a/content/nap-waf/v4/_index.md b/content/nap-waf/v4/_index.md new file mode 100644 index 000000000..4e6c5d6b3 --- /dev/null +++ b/content/nap-waf/v4/_index.md @@ -0,0 +1,9 @@ +--- +description: F5 NGINX App Protect WAF v4 documentation. +menu: + docs: + parent: F5 NGINX App Protect WAF Documentation. +title: Version 4 and Earlier +weight: 100 +url: /nginx-app-protect-waf/v4/ +--- diff --git a/content/nap-waf/v4/admin-guide/_index.md b/content/nap-waf/v4/admin-guide/_index.md new file mode 100644 index 000000000..ee20b96c0 --- /dev/null +++ b/content/nap-waf/v4/admin-guide/_index.md @@ -0,0 +1,10 @@ +--- +description: Learn how to deploy, upgrade, and manage F5 NGINX App Protect WAF and App + Protect Signatures. Versions 4 and earlier. +menu: + docs: + parent: v4 and earlier +title: Administration Guides +weight: 100 +url: /nginx-app-protect-waf/v4/admin-guide/ +--- diff --git a/content/nap-waf/v4/admin-guide/install-nms.md b/content/nap-waf/v4/admin-guide/install-nms.md new file mode 100644 index 000000000..a91b0a06a --- /dev/null +++ b/content/nap-waf/v4/admin-guide/install-nms.md @@ -0,0 +1,32 @@ +--- +description: Learn how to get more from F5 NGINX App Protect WAF with the NGINX Management + Suite Security Monitoring and Instance Manager modules. +docs: DOCS-1126 +doctypes: +- task +title: Using NGINX App Protect WAF with NGINX Management Suite +toc: true +weight: 100 +--- + +## Overview + +[F5 NGINX Management Suite Instance Manager]({{< relref "/nms/about.md#instance-manager" >}}) provides a centralized interface where you can create, modify, and publish policies, attack signatures, and threat campaigns for NGINX App Protect WAF. You can use Instance Manager to deploy configuration updates to one, some, or all your NGINX App Protect WAF data plane instances simultaneously. + +- Refer to [Manage Your App Protect WAF Configs]({{< relref "/nim/nginx-app-protect/setup-waf-config-management.md" >}}) for installation instructions. + +--- + +## Using Security Monitoring with NGINX Management Suite + +[NGINX Management Suite Security Monitoring]({{< relref "/nms/about.md#security-monitoring" >}}) provides a centralized visualization tool that lets you analyze threats, view protection insights, and identify areas for policy tuning. + +- For more information on how to configure Security Monitoring, see [Set Up App Protect Instances for Security Monitoring]({{< relref "/nim/monitoring/security-monitoring/configure/set-up-app-protect-instances.md" >}}). + +--- + +## What's Next + +Check out the NGINX Solution Blog to learn more about the benefits of using Instance Manager and Security Monitoring with NGINX App Protect WAF: + +- [Why Managing WAFs at Scale Requires Centralized Visibility and Configuration Management](https://www.nginx.com/blog/why-managing-wafs-at-scale-requires-centralized-visibility-and-configuration-management/). diff --git a/content/nap-waf/v4/admin-guide/install.md b/content/nap-waf/v4/admin-guide/install.md new file mode 100644 index 000000000..a3385a156 --- /dev/null +++ b/content/nap-waf/v4/admin-guide/install.md @@ -0,0 +1,3363 @@ +--- +description: This guide explains how to deploy F5 NGINX App Protect WAF v4 as well as + upgrade App Protect and the App Protect signature sets. +docs: DOCS-646 +doctypes: +- task +title: NGINX App Protect WAF Administration Guide +toc: true +weight: 100 +--- + +## Overview + +F5 NGINX App Protect WAF provides web application firewall (WAF) security protection for your web applications, including OWASP Top 10; response inspection; Meta characters check; HTTP protocol compliance; evasion techniques; disallowed file types; JSON & XML well-formedness; sensitive parameters & Data Guard. Refer to [Supported Security Policy Features]({{< relref "/nap-waf/v4/configuration-guide/configuration.md#supported-security-policy-features" >}}) section for a more detailed description. + +This guide explains how to deploy NGINX App Protect WAF as well as upgrade App Protect and the App Protect signature sets.
      + +### Using NGINX App Protect with NGINX Instance Manager + +NGINX Instance Manager provides centralized configuration management and visibility for your NGINX App Protect WAF fleet. +After completing the NGINX App Protect WAF installation, refer to the [NGINX Instance Manager Installation Guide](https://docs.nginx.com/nginx-instance-manager/deploy/) for the deployment instructions.
      + +## Prerequisites + +NGINX App Protect WAF is available to customers as a downloadable dynamic module at an additional cost. To purchase or add NGINX App Protect WAF to an existing NGINX Plus subscription, contact the NGINX sales team. + +NGINX Plus Release 22 and later supports NGINX App Protect WAF. + +NGINX App Protect WAF supports the following operating systems: + +- [CentOS/RHEL 7.4.x and above](#centos-74-installation) - (Deprecated starting from release 4.11) +- [RHEL 8.1.x and above](#rhel-81-installation) +- [RHEL 9 and above](#rhel-9-installation) +- [Oracle Linux 8.1.x and above](#oracle-linux-81-installation) +- [Amazon Linux 2](#amazon-linux-2-lts-installation) - (Deprecated starting from release 4.11) +- [Amazon Linux 2023](#amazon-linux-2023-installation) +- [Debian 10 (Buster)](#debian-10--debian-11--debian-12-installation) - (Deprecated starting from NGINX Plus R28) +- [Debian 11 (Bullseye)](#debian-10--debian-11--debian-12-installation) +- [Debian 12 (Bookworm)](#debian-10--debian-11--debian-12-installation) +- [Ubuntu 18.04 (Bionic)](#ubuntu-1804--ubuntu-2004--ubuntu-2204--ubuntu-2404-installation) - (Deprecated starting from NGINX Plus R30) +- [Ubuntu 20.04 (Focal)](#ubuntu-1804--ubuntu-2004--ubuntu-2204--ubuntu-2404-installation) +- [Ubuntu 22.04 (Jammy)](#ubuntu-1804--ubuntu-2004--ubuntu-2204--ubuntu-2404-installation) +- [Ubuntu 24.04 (Noble)](#ubuntu-1804--ubuntu-2004--ubuntu-2204--ubuntu-2404-installation) +- [Alpine 3.16](#alpine-316--alpine-317-installation) - (Deprecated starting from NGINX Plus R33) +- [Alpine 3.17](#alpine-316--alpine-317-installation) + + +The NGINX App Protect WAF package has the following dependencies: + +1. **nginx-plus-module-appprotect** - NGINX Plus dynamic module for App Protect +2. **app-protect-engine** - The App Protect enforcement engine +3. **app-protect-plugin** - The App Protect connector API between the engine and the NGINX Plus dynamic module +4. **app-protect-compiler** - The App Protect enforcement engine compiler agent +5. **app-protect-common** - The App Protect shared libraries package +6. **app-protect-geoip** - The App Protect geolocation update package +6. **app-protect-graphql** - The App Protect shared library package for GraphQL protection +7. **app-protect-attack-signatures** - The App Protect attack signatures update package +8. **app-protect-threat-campaigns** - The App Protect threat campaigns update package +9. **app-protect-bot-signatures** - The App Protect bot signatures update package +9. **app-protect-selinux** - The prebuilt SELinux policy module for NGINX App Protect WAF (optional dependency) + +See the NGINX Plus full list of prerequisites for more details. NGINX App Protect WAF can be installed as a module to an existing NGINX Plus installation or as a complete NGINX Plus with App Protect installation in a clean environment. + +## Storage I/O Performance + +When deploying App Protect on NGINX Plus take into consideration the performance of storage on which it is going to be installed. +The storage performance may greatly affect the time it takes NGINX Plus to reload whenever there is a configuration change, especially when NGINX App Protect WAF policies are being added or updated. +In order to assess the storage performance you can use a tool called [fio](https://fio.readthedocs.io/en/latest/fio_doc.html). An example of usage follows: (you may need to use `sudo`): + +```shell +fio --filename=/opt/tst --size=100MB --direct=1 --rw=randrw --bs=4k --ioengine=libaio --iodepth=256 --runtime=120 --numjobs=4 --time_based --group_reporting --name=iops-test-job --eta-newline=1 +``` + +The output fields relevant in this context are: `read: IOPS=` and `write: IOPS=`. +Below is a table showing how many seconds it takes a reload to complete, when NGINX Plus is reloaded with an updated policy of an average size, in environments of varying I/O performance, and comparable CPU/memory specs: + +{{}} +|Read IOPS | Write IOPS | Reload Time| +| ---| ---| --- | +|675 | 678 | 18 | +|1575 | 1575 | 12 | +|13400 | 13400 | 8 | + +{{}} + +## Platform Security Considerations + +When deploying App Protect on NGINX Plus take the following precautions to secure the platform. This avoids the risk of causing a Denial of Service condition or compromising the platform security. + +- Restrict permissions to the files on the NGINX App Protect WAF platform to user **nginx** and group **nginx**, especially for the sensitive areas containing the configuration. +- Remove unnecessary remote access services on the platform. +- Configure a Syslog destination on the same machine as App Protect and proxy to an external destination. This avoids eavesdropping and [man-in-the-middle](https://en.wikipedia.org/wiki/Man-in-the-middle_attack) attacks on the Syslog channel. +- Regularly update the Operating System (OS) to avoid known OS vulnerabilities which may impact the service. + + +## User Permissions + +If a user other than **nginx** is to be used, note the following: + +- If **nginx** user creation is disallowed on the platform, the following warning may be seen during installation: + + ```shell + warning: user nginx does not exist - using root + ``` + +- After first installation, upgrade, or security update installation, perform the following steps **before** starting/restarting/reloading NGINX: + + - Modify user permissions on all installed NGINX App Protect WAF files: + + ```shell + chown -R : /usr/share/ts /var/log/app_protect /opt/app_protect /etc/app_protect + ``` + + - Modify user of NGINX App Protect WAF processes: + + For service startup modify the **User** in the following files on your platform: + + `nginx-app-protect.service` + + For [docker deployment](#general-docker-deployment-instructions), modify the `entrypoint.sh` script to use the correct user instead of **nginx** when starting up the `bd-socket-plugin` process. + + +## CentOS 7.4+ Installation + +1. If you already have NGINX packages on your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the CentOS server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo yum install ca-certificates epel-release wget + ``` + +6. Remove any previously downloaded NGINX Plus repository file from `/etc/yum.repos.d`: + + ```shell + sudo rm /etc/yum.repos.d/nginx-plus-*.repo + ``` + +7. Add NGINX Plus repository by downloading the file `nginx-plus-7.4.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + ``` + +8. Add NGINX App Protect WAF repository by downloading the file `app-protect-7.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + ``` + +9. Install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): + + ```shell + sudo yum install app-protect + ``` + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo yum --showduplicates list app-protect + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo yum install app-protect-24+3.639.0 + ``` + +10. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +11. Load the NGINX App Protect WAF module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +12. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_enable on; + ``` + +13. Optionally, install a prebuilt SELinux policy module for NGINX App Protect WAF (or configure SELinux as appropriate per your organization's security policies): + + ```shell + sudo yum install app-protect-selinux + ``` + + If you encounter any issues, check the [Troubleshooting Guide]({{< relref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). + +14. To enable the NGINX/App Protect WAF service start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +15. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +## RHEL 7.4+ Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the RHEL server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo yum install ca-certificates wget + ``` + +6. Remove any previously downloaded NGINX Plus repository file from `/etc/yum.repos.d`: + + ```shell + sudo rm /etc/yum.repos.d/nginx-plus-*.repo + ``` + +7. Add NGINX Plus repository by downloading the file `nginx-plus-7.4.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + ``` + +8. Add NGINX App Protect WAF repository by downloading the file `app-protect-7.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + ``` + +9. Enable Yum repositories to pull App Protect dependencies: + + Download the file `dependencies.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo + ``` + + If you have a RHEL subscription: + + ```shell + sudo yum-config-manager --enable rhui-REGION-rhel-server-optional rhui-REGION-rhel-server-releases rhel-7-server-optional-rpms + ``` + + If you don't have a RHEL subscription, you can pull the dependencies from the CentOS repository: + Create a new repository `centos.repo` in `/etc/yum.repos.d/` with the content: + + ```shell + [centos] + name=CentOS-7 + baseurl=http://ftp.heanet.ie/pub/centos/7/os/x86_64/ + enabled=1 + gpgcheck=1 + gpgkey=http://ftp.heanet.ie/pub/centos/7/os/x86_64/RPM-GPG-KEY-CentOS-7 + ``` + +10. Install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): + + ```shell + sudo yum install app-protect + ``` + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo yum --showduplicates list app-protect + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo yum install app-protect-24+3.639.0 + ``` + +11. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +12. Load the NGINX App Protect WAF module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +13. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_enable on; + ``` + +14. Optionally, install a prebuilt SELinux policy module for NGINX App Protect WAF (or configure SELinux as appropriate per your organization's security policies): + + ```shell + sudo yum install app-protect-selinux + ``` + + If you encounter any issues, check the [Troubleshooting Guide]({{< relref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). + +15. To enable the NGINX/App Protect WAF service start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +16. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +## RHEL 8.1+ Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the RHEL server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo dnf install ca-certificates wget + ``` + +6. Remove any previously downloaded NGINX Plus repository file from `/etc/yum.repos.d`: + + ```shell + sudo rm /etc/yum.repos.d/nginx-plus-8.repo + ``` + +7. Add NGINX Plus repository by downloading the file `nginx-plus-8.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-8.repo + ``` + +8. Add NGINX App Protect WAF repository by downloading the file `app-protect-8.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-8.repo + ``` + +9. Enable Yum repositories to pull App Protect dependencies: + + Download the file `dependencies.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo + ``` + + Enable `codeready-builder` repository through subscription manager: + + ```shell + sudo subscription-manager repos --enable codeready-builder-for-rhel-8-x86_64-rpms + ``` + +10. Install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): + + ```shell + sudo dnf install app-protect + ``` + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo dnf --showduplicates list app-protect + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo dnf install app-protect-31+4.641.0 + ``` + +11. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +12. Load the NGINX App Protect WAF module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +13. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_enable on; + ``` + +14. Optionally, install a prebuilt SELinux policy module for NGINX App Protect WAF (or configure SELinux as appropriate per your organization's security policies): + + ```shell + sudo dnf install app-protect-selinux + ``` + + If you encounter any issues, check the [Troubleshooting Guide]({{< relref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). + +15. To enable the NGINX/App Protect WAF service start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +16. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +## RHEL 9+ Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the RHEL server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo dnf install ca-certificates wget + ``` + +6. Remove any previously downloaded NGINX Plus repository file from `/etc/yum.repos.d`: + + ```shell + sudo rm /etc/yum.repos.d/plus-*.repo + ``` + +7. Add NGINX Plus repository by downloading the file `plus-9.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/plus-9.repo + ``` + +8. Add NGINX App Protect WAF repository by downloading the file `app-protect-9.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-9.repo + ``` + +9. Enable Yum repositories to pull App Protect dependencies: + + Download the file `dependencies.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo + ``` + + Enable `codeready-builder` repository through subscription manager: + + ```shell + sudo subscription-manager repos --enable codeready-builder-for-rhel-9-x86_64-rpms + ``` + +10. Install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): + + ```shell + sudo dnf install app-protect + ``` + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo dnf --showduplicates list app-protect + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo dnf install app-protect-31+4.641.0 + ``` + +11. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +12. Load the NGINX App Protect WAF module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +13. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_enable on; + ``` + +14. Optionally, install a prebuilt SELinux policy module for NGINX App Protect WAF (or configure SELinux as appropriate per your organization's security policies): + + ```shell + sudo dnf install app-protect-selinux + ``` + + If you encounter any issues, check the [Troubleshooting Guide]({{< relref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). + +15. To enable the NGINX/App Protect WAF service start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +16. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +## Oracle Linux 8.1+ Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the Oracle Linux server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo dnf install ca-certificates wget yum-utils + ``` + +6. Remove any previously downloaded NGINX Plus repository file from `/etc/yum.repos.d`: + + ```shell + sudo rm /etc/yum.repos.d/nginx-plus-*.repo + ``` + +7. Add NGINX Plus repository by downloading the file `nginx-plus-8.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-8.repo + ``` + +8. Add NGINX App Protect WAF repository by downloading the file `app-protect-8.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-8.repo + ``` + +9. Enable Yum repositories to pull App Protect dependencies: + + Download the file `dependencies.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo + ``` + + Enable `ol8_codeready_builder` repository: + + ```shell + dnf config-manager --set-enabled ol8_codeready_builder + ``` + +10. Install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): + + ```shell + sudo dnf install app-protect + ``` + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo dnf --showduplicates list app-protect + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo dnf install app-protect-26+3.890.0 + ``` + +11. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +12. Load the NGINX App Protect WAF module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +13. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_enable on; + ``` + +14. Optionally, install a prebuilt SELinux policy module for NGINX App Protect WAF (or configure SELinux as appropriate per your organization's security policies): + + ```shell + sudo dnf install app-protect-selinux + ``` + + If you encounter any issues, check the [Troubleshooting Guide]({{< relref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). + +15. To enable the NGINX/App Protect WAF service start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +16. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +## Amazon Linux 2 LTS Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the Amazon Linux server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo amazon-linux-extras enable epel + sudo yum clean metadata + sudo yum install ca-certificates epel-release wget + ``` + +6. Remove any previously downloaded NGINX Plus repository file from `/etc/yum.repos.d`: + + ```shell + sudo rm /etc/yum.repos.d/nginx-plus-7.repo + ``` + +7. Add NGINX Plus repository by downloading the file `nginx-plus-7.4.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + ``` + +8. Add NGINX App Protect WAF repository by downloading the file `app-protect-7.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + ``` + +9. Install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): + + ```shell + sudo yum install app-protect + ``` + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo yum --showduplicates list app-protect + ``` + + Then, install a specific version from the output of the command above. For example: + + ```shell + sudo yum install app-protect-24+3.639.0 + ``` + +10. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +11. Load the NGINX App Protect WAF module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +12. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_enable on; + ``` + +13. Optionally, install a prebuilt SELinux policy module for NGINX App Protect WAF (or configure SELinux as appropriate per your organization's security policies): + + ```shell + sudo yum install app-protect-selinux + ``` + + If you encounter any issues, check the [Troubleshooting Guide]({{< relref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). + +14. To enable the NGINX/App Protect WAF service start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +15. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +--- + +## Amazon Linux 2023 Installation + +1. If you already have NGINX packages in your system, back up your configuration and log files: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +1. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +1. Log into [MyF5](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +1. Copy `nginx-repo.key` and `nginx-repo.crt` to the RHEL server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +1. Install prerequisite packages: + + ```shell + sudo dnf install ca-certificates wget + ``` + +1. Remove any previously downloaded NGINX Plus repository files from `/etc/yum.repos.d`: + + ```shell + sudo rm /etc/yum.repos.d/plus-*.repo + ``` + +1. Add the NGINX Plus repository by downloading the file `plus-amazonlinux2023.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/plus-amazonlinux2023.repo + ``` + +1. Add the NGINX App Protect WAF repository by downloading the file `app-protect-amazonlinux2023.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-amazonlinux2023.repo + ``` + +1. Enable Yum repositories to pull App Protect dependencies: + + Download the file `dependencies.amazonlinux2023.repo` to `/etc/yum.repos.d`: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.amazonlinux2023.repo + ``` + +1. Install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): + + ```shell + sudo dnf install app-protect + ``` + + Alternatively, you can use the following command to list available versions: + + ```shell + sudo dnf --showduplicates list app-protect + ``` + + Then, install a specific version from the output of command above. For example: + + ```shell + sudo dnf install app-protect-31+4.641.0 + ``` + +1. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +1. Load the NGINX App Protect WAF module on the main context in the `nginx.conf`: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +1. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` file: + + ```nginx + app_protect_enable on; + ``` + +1. Optionally, install a prebuilt SELinux policy module for NGINX App Protect WAF (or configure SELinux as appropriate per your organization's security policies): + + ```shell + sudo dnf install app-protect-selinux + ``` + + If you encounter any issues, check the [Troubleshooting Guide]({{< relref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). + +1. To enable the NGINX/App Protect WAF service start at boot, run the command: + + ```shell + sudo systemctl enable nginx.service + ``` + +1. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +## Debian 10 / Debian 11 / Debian 12 Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory and change the directory to the SSL certificate directory after creating the folder: + + ```shell + sudo mkdir -p /etc/ssl/nginx + cd /etc/ssl/nginx + ``` + +3. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the Debian server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo apt-get update && sudo apt-get install apt-transport-https lsb-release ca-certificates wget gnupg2 + ``` + + {{< note >}} In case the apt installation or database update fails due to release info change, run the below command before you install. {{< /note >}} + + ```shell + sudo apt-get update --allow-releaseinfo-change + ``` + +6. Download and add the NGINX signing keys: + + ```shell + wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | gpg --dearmor | \ + sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + + wget -qO - https://cs.nginx.com/static/keys/app-protect-security-updates.key | gpg --dearmor | \ + sudo tee /usr/share/keyrings/app-protect-security-updates.gpg >/dev/null + ``` + +7. Remove any previous NGINX Plus repository and apt configuration files: + + ```shell + sudo rm /etc/apt/sources.list.d/nginx-plus.list + sudo rm /etc/apt/sources.list.d/*app-protect*.list + sudo rm /etc/apt/apt.conf.d/90pkgs-nginx + ``` + +8. Add NGINX Plus repository: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" | \ + sudo tee /etc/apt/sources.list.d/nginx-plus.list + ``` + +9. Add NGINX App Protect WAF repositories: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/app-protect/debian `lsb_release -cs` nginx-plus\n" | \ + sudo tee /etc/apt/sources.list.d/nginx-app-protect.list + + printf "deb [signed-by=/usr/share/keyrings/app-protect-security-updates.gpg] \ + https://pkgs.nginx.com/app-protect-security-updates/debian `lsb_release -cs` nginx-plus\n" | \ + sudo tee /etc/apt/sources.list.d/app-protect-security-updates.list + ``` + +10. Download the apt configuration to `/etc/apt/apt.conf.d`: + + ```shell + sudo wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + ``` + +11. Update the repository and install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): + + ```shell + sudo apt-get update + sudo apt-get install app-protect + ``` + + Alternatively, to install a specific version, use the following commands to update and list available versions: + + ```shell + sudo apt-get update + sudo apt-cache policy app-protect + ``` + + {{< note >}} When installing an older version of NGINX App Protect WAF, the dependent packages have to be installed manually, as shown in the command above. The following script can be used to find out the dependent packages for a specific version of NGINX App Protect WAF.{{< /note >}} + + ```shell + findDeps () { local pkgs=$(apt show $1 2>/dev/null | grep Depends: | grep -oE "(nginx-plus-module|app-protect)-[a-z]+ *\(= *[0-9\+\.-]+~`lsb_release -cs`\)" | tr -d ' ()'); for p in ${pkgs[@]}; do echo $p; findDeps $p; done; } + findDeps app-protect=24+3.639.0-1~[OS_CODENAME] + ``` + + Finally, install a specific version from the output of command above. For example: + + ```shell + sudo apt-get install -y app-protect-compiler=8.7.4-1~[OS_CODENAME] \ + app-protect-plugin=3.639.0-1~[OS_CODENAME] \ + nginx-plus-module-appprotect=24+3.639.0-1~[OS_CODENAME]\ + app-protect-engine=8.7.4-1~[OS_CODENAME] \ + app-protect=24+3.639.0-1~[OS_CODENAME] \ + app-protect-common=8.7.4-1~[OS_CODENAME] + ``` + + Replace the [OS_CODENAME] in the above example with: **buster** for Debian 10, **bullseye** for Debian 11 and **bookworm** for Debian 12. + +12. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +13. Load the NGINX App Protect WAF module on the main context in the `nginx.conf` file: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +14. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` via: + + ```nginx + app_protect_enable on; + ``` + +15. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +{{< note >}} Debian 10 / Debian 11 / Debian 12 activates **AppArmor** by default, but NGINX App Protect WAF will run in unconfined mode after being installed as it is shipped with no AppArmor profile. To benefit from AppArmor access control capabilities for NGINX App Protect WAF, you will have to write your own AppArmor profile for NGINX App Protect WAF executables found in `/opt/app_protect/bin` such that it best suits your environment. +{{< /note >}} + + +## Ubuntu 18.04 / Ubuntu 20.04 / Ubuntu 22.04 / Ubuntu 24.04 Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Create the `/etc/ssl/nginx/` directory: + + ```shell + sudo mkdir -p /etc/ssl/nginx + ``` + +3. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +4. Copy the above two files to the Ubuntu server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. + +5. Install prerequisite packages: + + ```shell + sudo apt-get update && sudo apt-get install apt-transport-https lsb-release ca-certificates wget gnupg2 + ``` + +6. Download and add the NGINX signing keys: + + ```shell + wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | \ + gpg --dearmor | sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + + wget -qO - https://cs.nginx.com/static/keys/app-protect-security-updates.key | \ + gpg --dearmor | sudo tee /usr/share/keyrings/app-protect-security-updates.gpg >/dev/null + ``` + +7. Remove any previous NGINX Plus repository and apt configuration files: + + ```shell + sudo rm /etc/apt/sources.list.d/nginx-plus.list + sudo rm /etc/apt/sources.list.d/*app-protect*.list + sudo rm /etc/apt/apt.conf.d/90pkgs-nginx + ``` + +8. Add NGINX Plus repository: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/plus/ubuntu `lsb_release -cs` nginx-plus\n" | \ + sudo tee /etc/apt/sources.list.d/nginx-plus.list + ``` + +9. Add NGINX App Protect WAF repositories: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/app-protect/ubuntu `lsb_release -cs` nginx-plus\n" | \ + sudo tee /etc/apt/sources.list.d/nginx-app-protect.list + + printf "deb [signed-by=/usr/share/keyrings/app-protect-security-updates.gpg] \ + https://pkgs.nginx.com/app-protect-security-updates/ubuntu `lsb_release -cs` nginx-plus\n" | \ + sudo tee /etc/apt/sources.list.d/app-protect-security-updates.list + ``` + +10. Download the apt configuration to `/etc/apt/apt.conf.d`: + + ```shell + sudo wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + ``` + +11. Update the repository and install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): + + ```shell + sudo apt-get update + sudo apt-get install app-protect + ``` + + Alternatively, to install a specific version, use the following commands to update and list available versions: + + ```shell + sudo apt-get update + sudo apt-cache policy app-protect + ``` + + Finally, install a specific version from the output of command above. For example: + + ```shell + sudo apt-get install -y app-protect-compiler=5.9.4-1~[OS_CODENAME] \ + app-protect-plugin=3.263.0-1~[OS_CODENAME] \ + nginx-plus-module-appprotect=23+3.263.0-1~[OS_CODENAME] \ + app-protect-engine=5.9.4-1~[OS_CODENAME] \ + app-protect=23+3.263.0-1~[OS_CODENAME] + ``` + + Replace the [OS_CODENAME] in above the example with: **bionic** for Ubuntu 18.04, **focal** for Ubuntu 20.04, **jammy** for Ubuntu 22.04 and **noble** for Ubuntu 24.04. +
      +
      + + {{< note >}} When installing an older version of NGINX App Protect WAF, the dependent packages have to be installed manually, as shown in the command above. The following script can be used to find out the dependent packages for a specific version of NGINX App Protect WAF.{{< /note >}} + + ```shell + findDeps () { local pkgs=$(apt show $1 2>/dev/null | grep Depends: | grep -oE "(nginx-plus-module|app-protect)-[a-z]+ *\(= *[0-9\+\.-]+~`lsb_release -cs`\)" | tr -d ' ()'); for p in ${pkgs[@]}; do echo $p; findDeps $p; done; } + findDeps app-protect=23+3.263.0-1~[OS_CODENAME] + ``` + +12. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +13. Load the NGINX App Protect WAF module on the main context in the `nginx.conf` file: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +14. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` via: + + ```nginx + app_protect_enable on; + ``` + +15. Start the NGINX service: + + ```shell + sudo systemctl start nginx + ``` + +{{< note >}} Ubuntu 20.04 / Ubuntu 22.04 / Ubuntu 24.04 activates **AppArmor** by default, but NGINX App Protect WAF will run in unconfined mode after being installed as it is shipped with no AppArmor profile. To benefit from AppArmor access control capabilities for NGINX App Protect WAF, you will have to write your own AppArmor profile for NGINX App Protect WAF executables found in `/opt/app_protect/bin` such that it best suits your environment. +{{< /note >}} + + +## Alpine 3.16 / Alpine 3.17 Installation + +1. If you already have NGINX packages in your system, back up your configs and logs: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +2. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +3. Upload `nginx-repo.key` to `/etc/apk/cert.key` and `nginx-repo.crt` to `/etc/apk/cert.pem`. Make sure that files do not contain other certificates and keys, as Alpine Linux does not support mixing client certificates for different repositories. + +4. Add the NGINX public signing key to the directory `/etc/apk/keys`: + + ```shell + sudo wget -O /etc/apk/keys/nginx_signing.rsa.pub https://cs.nginx.com/static/keys/nginx_signing.rsa.pub + + sudo wget -O /etc/apk/keys/app-protect-security-updates.rsa.pub https://cs.nginx.com/static/keys/app-protect-security-updates.rsa.pub + ``` + +5. Remove any previously configured NGINX Plus repository: + + ```shell + sed "/plus-pkgs.nginx.com/d" /etc/apk/repositories + ``` + +6. Add the NGINX Plus repository to `/etc/apk/repositories` file: + + ```shell + printf "https://pkgs.nginx.com/plus/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | sudo tee -a /etc/apk/repositories + ``` + +7. Add the NGINX App Protect WAF repository to `/etc/apk/repositories` file: + + ```shell + printf "https://pkgs.nginx.com/app-protect/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | sudo tee -a /etc/apk/repositories + + printf "https://pkgs.nginx.com/app-protect-security-updates/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | sudo tee -a /etc/apk/repositories + ``` + +8. We recommend removing all community-supported NGINX packages. Note that all NGINX modules will be removed as well. + + ```shell + sudo apk del -r app-protect + sudo apk del -r nginx + ``` + +9. Update the repository and install the most recent version of the NGINX Plus and NGINX App Protect WAF: + + ```shell + sudo apk update + sudo apk add app-protect + ``` + + Alternatively, use the following commands to install the most recent version of NGINX App Protect WAF for NGINX Plus R28: + + ```shell + sudo apk update + sudo apk add app-protect + ``` + + Alternatively, use the following commands to list available versions: + + ```shell + sudo apk update + sudo apk info app-protect + ``` + + Finally, install a specific version from the output of command above. For example: + + ```shell + sudo apk add app-protect=30.4.457.0-r1 + ``` + +10. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: + + ```shell + sudo nginx -v + ``` + +11. Load the NGINX App Protect WAF module on the main context in the `nginx.conf` file: + + ```nginx + load_module modules/ngx_http_app_protect_module.so; + ``` + +12. Enable NGINX App Protect WAF on an `http/server/location` context in the `nginx.conf` via: + + ```nginx + app_protect_enable on; + ``` + +13. Start the App Protect and NGINX services: + + ```shell + sudo service nginx-app-protect start + sudo service nginx start + ``` + + +## Docker Deployment + +### General Docker Deployment Instructions + +1. Create a Dockerfile (see examples below) which copies the following files into the docker image: + + - `nginx.conf`: User defined nginx.conf with app-protect enabled + - `entrypoint.sh`: Docker startup script which spins up all App Protect processes, must have executable permissions + - `custom_log_format.json`: Optional user-defined security log format file (if not used - remove its references from the `nginx.conf` and Dockerfile) + +2. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +3. Copy the files to the directory where the Dockerfile is located. + +4. Optionally, create `custom_log_format.json` in the same directory, for example: + + ```json + { + "filter": { + "request_type": "all" + }, + "content": { + "format": "splunk", + "max_request_size": "any", + "max_message_size": "10k" + } + } + ``` + +5. In the same directory create an `nginx.conf` file with the following contents: + + ```nginx + user nginx; + + worker_processes auto; + load_module modules/ngx_http_app_protect_module.so; + + error_log /var/log/nginx/error.log debug; + + events { + worker_connections 10240; + } + + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + sendfile on; + keepalive_timeout 65; + + upstream app_backend_com { + server 192.168.0.1:8000; + server 192.168.0.1:8001; + } + server { + listen 80; + server_name app.example.com; + proxy_http_version 1.1; + + app_protect_enable on; + app_protect_security_log_enable on; + app_protect_security_log "/etc/nginx/custom_log_format.json" syslog:server=127.0.0.1:514; + + location / { + client_max_body_size 0; + default_type text/html; + # set your backend here + proxy_pass http://app_backend_com; + proxy_set_header Host $host; + } + } + } + ``` + + {{< important >}}Make sure to replace upstream and proxy pass directives in this example with relevant application backend settings.{{< /important >}} +6. In the same directory create an `entrypoint.sh` file with the following content: + + ```shell + #!/bin/sh + + /bin/su -s /bin/sh -c "/usr/share/ts/bin/bd-socket-plugin tmm_count 4 proc_cpuinfo_cpu_mhz 2000000 total_xml_memory 307200000 total_umu_max_size 3129344 sys_max_account_id 1024 no_static_config 2>&1 >> /var/log/app_protect/bd-socket-plugin.log &" nginx + /usr/sbin/nginx -g 'daemon off;' + ``` + +7. Create a Docker image: + + - For CentOS/Oracle Linux/Debian/Ubuntu/Alpine/Amazon Linux: + + ```shell + DOCKER_BUILDKIT=1 docker build --no-cache --secret id=nginx-crt,src=nginx-repo.crt --secret id=nginx-key,src=nginx-repo.key -t app-protect . + ``` + + The `DOCKER_BUILDKIT=1` enables `docker build` to recognize the `--secret` flag which allows the user to pass secret information to be used in the Dockerfile for building docker images in a safe way that will not end up stored in the final image. This is a recommended practice for the handling of the certificate and private key for NGINX repository access (`nginx-repo.crt` and `nginx-repo.key` files). More information [here](https://docs.docker.com/engine/reference/commandline/buildx_build/#secret). + + - For RHEL: + + ```shell + podman build --no-cache --secret id=nginx-crt,src=nginx-repo.crt --secret id=nginx-key,src=nginx-repo.key -t app-protect . + ``` + + **Notes:** + - The `--no-cache` option tells Docker/Podman to build the image from scratch and ensures the installation of the latest version of NGINX Plus and NGINX App Protect WAF 4.x. If the Dockerfile was previously used to build an image without the `--no-cache` option, the new image uses versions from the previously built image from the cache. + - For RHEL:
      + The subscription-manager is disabled when running inside containers based on Red Hat Universal Base images. You will need a registered and subscribed RHEL system. + +8. Verify that the app-protect image was created successfully with the docker images command: + + ```shell + docker images app-protect + ``` + +9. Create a container based on this image, for example, my-app-protect container: + + ```shell + docker run --name my-app-protect -p 80:80 -d app-protect + ``` + +10. Verify that the my-app-protect container is up and running with the `docker ps` command: + + ```shell + docker ps + ``` + +### CentOS 7.4 Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For CentOS 7: +FROM centos:7.4.1708 + +# Install prerequisite packages: +RUN yum -y install wget ca-certificates epel-release + +# Add NGINX Plus repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + +# Add NGINX App-protect repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + yum -y install app-protect \ + && yum clean all \ + && rm -rf /var/cache/yum + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + +### RHEL UBI7 Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For RHEL ubi7: +FROM registry.access.redhat.com/ubi7/ubi + +# Install prerequisite packages: +RUN yum -y install wget ca-certificates + +# Add NGINX Plus repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + +# Add NGINX App-protect & dependencies repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo \ + # You can use either of the dependencies or epel repo + # && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \ + && yum clean all + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + yum install --enablerepo=rhel-7-server-extras-rpms --enablerepo=rhel-7-server-optional-rpms --enablerepo=rhel-7-server-rpms -y app-protect \ + && yum clean all \ + && rm -rf /var/cache/yum + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + +### RHEL UBI8 Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For RHEL ubi8: +FROM registry.access.redhat.com/ubi8/ubi + +# Install prerequisite packages: +RUN dnf -y install wget ca-certificates + +# Add NGINX Plus repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-8.repo + +# Add NGINX App-protect & dependencies repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-8.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo \ + # You can use either of the dependencies or epel repo + # && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \ + && dnf clean all + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + dnf install --enablerepo=codeready-builder-for-rhel-8-x86_64-rpms -y app-protect \ + && dnf clean all \ + && rm -rf /var/cache/dnf + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + +### RHEL UBI9 Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For RHEL ubi9: +FROM registry.access.redhat.com/ubi9/ubi + +# Install prerequisite packages: +RUN dnf -y install wget ca-certificates + +# Add NGINX Plus repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/plus-9.repo + +# Add NGINX App-protect & dependencies repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-9.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo \ + # You can use either of the dependencies or epel repo + # && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm \ + && dnf clean all + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + dnf install --enablerepo=codeready-builder-for-rhel-9-x86_64-rpms -y app-protect \ + && dnf clean all \ + && rm -rf /var/cache/dnf + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + +### Oracle Linux 8 Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For Oracle Linux 8: +FROM oraclelinux:8 + +# Install prerequisite packages: +RUN dnf -y install wget ca-certificates yum-utils + +# Add NGINX Plus repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-8.repo + +# Add NGINX App-protect repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-8.repo + +# Enable Yum repositories to pull App Protect dependencies: +RUN dnf config-manager --set-enabled ol8_codeready_builder \ + && wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo \ + # You can use either of the dependencies or epel repo + # && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \ + && dnf clean all + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + dnf -y install app-protect \ + && dnf clean all \ + && rm -rf /var/cache/dnf + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + +### Amazon Linux 2 Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For Amazon Linux 2: +FROM amazonlinux:2 + +# Install prerequisite packages: +RUN amazon-linux-extras enable epel +RUN yum clean metadata +RUN yum -y install wget ca-certificates epel-release shadow-utils + +# Add NGINX Plus repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + +# Add NGINX App-protect repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + yum -y install app-protect \ + && yum clean all \ + && rm -rf /var/cache/yum + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + +### Amazon Linux 2023 Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For Amazon Linux 2023: +FROM amazonlinux:2023 + +# Install prerequisite packages: +RUN dnf -y install wget ca-certificates + +# Add NGINX Plus repo: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/plus-amazonlinux2023.repo + +# Add NAP dependencies repo: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.amazonlinux2023.repo + +# Add NGINX App-protect repo: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-amazonlinux2023.repo + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + dnf -y install app-protect \ + && dnf clean all \ + && rm -rf /var/cache/yum + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + +### Debian 10 (Buster) / 11 (Bullseye) / 12 (Bookworm) Docker Deployment Example + +```dockerfile +ARG OS_CODENAME +# Where OS_CODENAME can be: buster/bullseye/bookworm +# syntax=docker/dockerfile:1 +# For Debian 10 / 11 / 12: +FROM debian:${OS_CODENAME} + +# Install prerequisite packages: +RUN apt-get update && apt-get install -y apt-transport-https lsb-release ca-certificates wget gnupg2 + +# Download and add the NGINX signing keys: +RUN wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | \ + gpg --dearmor | tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null +RUN wget -qO - https://cs.nginx.com/static/keys/app-protect-security-updates.key | \ + gpg --dearmor | tee /usr/share/keyrings/app-protect-security-updates.gpg >/dev/null + +# Add NGINX Plus repository: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/nginx-plus.list + +# Add NGINX App Protect WAF repositories: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/app-protect/debian `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/nginx-app-protect.list +RUN printf "deb [signed-by=/usr/share/keyrings/app-protect-security-updates.gpg] \ + https://pkgs.nginx.com/app-protect-security-updates/debian `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/app-protect-security-updates.list + +# Download the apt configuration to `/etc/apt/apt.conf.d`: +RUN wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + +# Update the repository and install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + apt-get update && apt-get install -y app-protect + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + + +### Ubuntu 18.04 (Bionic) / 20.04 (Focal) / 22.04 (Jammy) / 24.04 (Noble) Docker Deployment Example + +```dockerfile +ARG OS_CODENAME +# Where OS_CODENAME can be: bionic/focal/jammy/noble +# syntax=docker/dockerfile:1 +# For Ubuntu 18.04 / 20.04 /22.04 / 24.04: +FROM ubuntu:${OS_CODENAME} + +# Install prerequisite packages: +RUN apt-get update && apt-get install -y apt-transport-https lsb-release ca-certificates wget gnupg2 + +# Download and add the NGINX signing keys: +RUN wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | \ + gpg --dearmor | tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null +RUN wget -qO - https://cs.nginx.com/static/keys/app-protect-security-updates.key | \ + gpg --dearmor | tee /usr/share/keyrings/app-protect-security-updates.gpg >/dev/null + +# Add NGINX Plus repository: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/plus/ubuntu `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/nginx-plus.list + +# Add NGINX App Protect WAF repositories: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/app-protect/ubuntu `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/nginx-app-protect.list +RUN printf "deb [signed-by=/usr/share/keyrings/app-protect-security-updates.gpg] \ + https://pkgs.nginx.com/app-protect-security-updates/ubuntu `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/app-protect-security-updates.list + +# Download the apt configuration to `/etc/apt/apt.conf.d`: +RUN wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + +# Update the repository and install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + apt-get update && DEBIAN_FRONTEND="noninteractive" apt-get install -y app-protect + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + +### Alpine 3.16 / Alpine 3.17 Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For Alpine 3.16 / 3.17: +FROM alpine:3.16/3.17 + +# Download and add the NGINX signing keys: +RUN wget -O /etc/apk/keys/nginx_signing.rsa.pub https://cs.nginx.com/static/keys/nginx_signing.rsa.pub \ + && wget -O /etc/apk/keys/app-protect-security-updates.rsa.pub https://cs.nginx.com/static/keys/app-protect-security-updates.rsa.pub + +# Add NGINX Plus repository: +RUN printf "https://pkgs.nginx.com/plus/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | tee -a /etc/apk/repositories + +# Add NGINX App Protect repository: +RUN printf "https://pkgs.nginx.com/app-protect/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | tee -a /etc/apk/repositories \ + && printf "https://pkgs.nginx.com/app-protect-security-updates/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | tee -a /etc/apk/repositories + +# Update the repository and install the most recent version of the NGINX App Protect WAF package (which includes NGINX Plus): +RUN --mount=type=secret,id=nginx-crt,dst=/etc/apk/cert.pem,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/apk/cert.key,mode=0644 \ + apk update && apk add app-protect + +# Forward request logs to Docker log collector: +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Copy configuration files: +COPY nginx.conf custom_log_format.json /etc/nginx/ +COPY entrypoint.sh /root/ + +CMD ["sh", "/root/entrypoint.sh"] +``` + + +## Converter Tool Docker Image + +This section explains how to build a Docker image for the purpose of converting policy files from other F5 WAF products to NGINX App Protect WAF JSON declarative format. +For more details regarding this feature refer to [Converter Tools]({{< relref "/nap-waf/v4/configuration-guide/configuration.md#converter-tools" >}}). + +### Converter Docker Deployment Instructions +You need root permissions to execute the following steps. + +1. Create a Dockerfile (see examples below). + +2. Log in to the [Customer Portal](https://my.f5.com) and download the following two files: + + ```shell + nginx-repo.key + nginx-repo.crt + ``` + +3. Create a Docker image: + + - For CentOS/Oracle Linux/Debian/Ubuntu/Alpine/Amazon Linux: + + ```shell + DOCKER_BUILDKIT=1 docker build --no-cache --secret id=nginx-crt,src=nginx-repo.crt --secret id=nginx-key,src=nginx-repo.key -t app-protect-converter . + ``` + + The `DOCKER_BUILDKIT=1` enables `docker build` to recognize the `--secret` flag which allows the user to pass secret information to be used in the Dockerfile for building docker images in a safe way that will not end up stored in the final image. This is a recommended practice for the handling of the certificate and private key for NGINX repository access (`nginx-repo.crt` and `nginx-repo.key` files). More information [here](https://docs.docker.com/engine/reference/commandline/buildx_build/#secret). + + - For RHEL: + + ```shell + podman build --no-cache --secret id=nginx-crt,src=nginx-repo.crt --secret id=nginx-key,src=nginx-repo.key -t app-protect-converter . + ``` + + **Notes:** + - The `--no-cache` option tells Docker/Podman to build the image from scratch and ensures the installation of the latest version of NGINX Plus and NGINX App Protect WAF 4.x. If the Dockerfile was previously used to build an image without the `--no-cache` option, the new image uses versions from the previously built image from the cache. + - For RHEL:
      + The subscription-manager is disabled when running inside containers based on Red Hat Universal Base images. You will need a registered and subscribed RHEL system. + +4. Create a temporary folder and copy your XML policy file(s) to it: + + ```shell + mkdir /tmp/convert + cp policy.xml /tmp/convert/ + ``` + +5. Run the docker image with the temporary folder as a mounted volume on the container, and run the policy converter script: + + ```shell + docker run -v /tmp/convert:/tmp/convert app-protect-converter /opt/app_protect/bin/convert-policy -i /tmp/convert/policy.xml -o /tmp/convert/policy.json | jq + ``` + + Output: + + ```json + { + "completed_successfully": true, + "file_size": 20604, + "warnings": [ + "Default header '*-bin' cannot be deleted.", + "Traffic Learning, Policy Building, and staging are unsupported", + "/general/enableEventCorrelation must be '0' (was '1').", + "Element '/websocket-urls' is unsupported.", + "/signature-sets/learn value 1 is unsupported", + "Element '/redirection-protection' is unsupported.", + "/protocolIndependent must be '1' (was '0').", + "Element '/gwt-profiles' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_ASM_COOKIE_HIJACKING' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_BLOCKING_CONDITION' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_BRUTE_FORCE' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_CONVICTION' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_CROSS_ORIGIN_REQUEST' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_CSRF' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_CSRF_EXPIRED' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_DYNAMIC_SESSION' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_FLOW' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_FLOW_DISALLOWED_INPUT' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_FLOW_ENTRY_POINT' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_FLOW_MANDATORY_PARAMS' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_GEOLOCATION' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_GWT_FORMAT' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_GWT_MALFORMED' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_HOSTNAME_MISMATCH' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_LOGIN_URL_BYPASSED' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_LOGIN_URL_EXPIRED' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_MALICIOUS_DEVICE' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_MALICIOUS_IP' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_PARAMETER_DYNAMIC_VALUE' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_PLAINTEXT_FORMAT' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_REDIRECT' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_SESSION_AWARENESS' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_VIRUS' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_BAD_REQUEST' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_BINARY_MESSAGE_LENGTH' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_BINARY_MESSAGE_NOT_ALLOWED' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_EXTENSION' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_FRAMES_PER_MESSAGE_COUNT' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_FRAME_LENGTH' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_FRAME_MASKING' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_FRAMING_PROTOCOL' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_TEXT_MESSAGE_NOT_ALLOWED' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_WEBSOCKET_TEXT_NULL_VALUE' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_XML_SCHEMA' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_XML_SOAP_ATTACHMENT' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_XML_SOAP_METHOD' is unsupported.", + "/blocking-settings/violations/name value 'VIOL_XML_WEB_SERVICES_SECURITY' is unsupported.", + "/blocking-settings/http-protocols/description value 'Unparsable request content' is unsupported.", + "Element '/plain-text-profiles' is unsupported." + ], + "filename": "/tmp/convert/policy-ubuntu.json" + } + ``` + +6. Once completed, the newly exported JSON policy file should reside in the same folder as the source XML policy file: + + ```shell + ls -l /tmp/convert/ + total 848 + -rw-r--r-- 1 root root 20604 Dec 20 12:33 policy.json # Exported JSON policy file + -rw-r--r-- 1 root root 841818 Dec 20 11:10 policy.xml # Original XML policy file + ``` + + +### CentOS 7 Converter Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For CentOS 7: +FROM centos:7 + +# Install prerequisite packages: +RUN yum -y install wget ca-certificates epel-release + +# Add NGINX Plus repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + +# Update the repository and install the most recent version of the NGINX App Protect WAF Compiler package: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + yum -y install app-protect-compiler \ + && yum clean all \ + && rm -rf /var/cache/yum +``` + +### RHEL UBI7 Converter Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For RHEL ubi7: +FROM registry.access.redhat.com/ubi7/ubi + +# Install prerequisite packages: +RUN yum -y install wget ca-certificates + +# Add NGINX App-protect & dependencies repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo \ + # You can use either of the dependencies or epel repo + # && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \ + && yum clean all + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + yum install --enablerepo=rhel-7-server-extras-rpms --enablerepo=rhel-7-server-optional-rpms --enablerepo=rhel-7-server-rpms -y app-protect-compiler \ + && yum clean all \ + && rm -rf /var/cache/yum +``` + +### RHEL UBI8 Converter Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For RHEL ubi8: +FROM registry.access.redhat.com/ubi8/ubi + +# Install prerequisite packages: +RUN dnf -y install wget ca-certificates + +# Add NGINX App-protect & dependencies repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-8.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo \ + # You can use either of the dependencies or epel repo + # && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \ + && dnf clean all + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + dnf install --enablerepo=codeready-builder-for-rhel-8-x86_64-rpms -y app-protect-compiler \ + && dnf clean all \ + && rm -rf /var/cache/dnf +``` + +### RHEL UBI9 Converter Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For RHEL ubi9: +FROM registry.access.redhat.com/ubi9/ubi + +# Install prerequisite packages: +RUN dnf -y install wget ca-certificates + +# Add NGINX App-protect & dependencies repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-9.repo +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo \ + # You can use either of the dependencies or epel repo + # && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm \ + && dnf clean all + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + dnf install --enablerepo=codeready-builder-for-rhel-9-x86_64-rpms -y app-protect-compiler \ + && dnf clean all \ + && rm -rf /var/cache/dnf +``` + +### Oracle Linux 8 Converter Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For Oracle Linux 8: +FROM oraclelinux:8 + +# Install prerequisite packages: +RUN dnf -y install wget ca-certificates yum-utils + +# Add NGINX App-protect repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-8.repo + +# Enable Yum repositories to pull App Protect dependencies: +RUN dnf config-manager --set-enabled ol8_codeready_builder \ + && wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo \ + # You can use either of the dependencies or epel repo + # && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \ + && dnf clean all + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + dnf install -y app-protect-compiler \ + && dnf clean all \ + && rm -rf /var/cache/dnf +``` + +### Amazon Linux 2 Converter Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For Amazon Linux 2: +FROM amazonlinux:2 + +# Install prerequisite packages: +RUN amazon-linux-extras enable epel +RUN yum clean metadata +RUN yum -y install wget ca-certificates epel-release shadow-utils + +# Add NGINX App-protect repo to Yum: +RUN wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo + +# Install NGINX App Protect WAF: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + yum -y install app-protect-compiler \ + && yum clean all \ + && rm -rf /var/cache/yum +``` + +### Debian 10 Converter Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For Debian 10: +FROM debian:buster + +# Install prerequisite packages: +RUN apt-get update && apt-get install -y apt-transport-https lsb-release ca-certificates wget gnupg2 + +# Download and add the NGINX signing keys: +RUN wget https://cs.nginx.com/static/keys/nginx_signing.key && apt-key add nginx_signing.key \ + && wget https://cs.nginx.com/static/keys/app-protect-security-updates.key && apt-key add app-protect-security-updates.key + +# Add NGINX App Protect WAF repositories: +RUN printf "deb https://pkgs.nginx.com/app-protect/debian `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/nginx-app-protect.list \ + && printf "deb https://pkgs.nginx.com/app-protect-security-updates/debian `lsb_release -cs` nginx-plus\n" | tee /etc/apt/sources.list.d/app-protect-security-updates.list + +# Download the apt configuration to `/etc/apt/apt.conf.d`: +RUN wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90nginx + +# Update the repository and install the most recent version of the NGINX App Protect WAF Compiler package: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + apt-get update && apt-get install -y app-protect-compiler + +CMD ["sh"] +``` + +### Debian 11 / Debian 12 Converter Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For Debian 11: +FROM debian:bullseye/bookworm + +# Install prerequisite packages: +RUN apt-get update && apt-get install -y apt-transport-https lsb-release ca-certificates wget gnupg2 + +# Download and add the NGINX signing keys: +RUN wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | \ + gpg --dearmor | tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null +RUN wget -qO - https://cs.nginx.com/static/keys/app-protect-security-updates.key | \ + gpg --dearmor | tee /usr/share/keyrings/app-protect-security-updates.gpg >/dev/null + +# Add NGINX App Protect WAF repositories: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/app-protect/debian `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/nginx-app-protect.list +RUN printf "deb [signed-by=/usr/share/keyrings/app-protect-security-updates.gpg] \ + https://pkgs.nginx.com/app-protect-security-updates/debian `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/app-protect-security-updates.list + +# Download the apt configuration to `/etc/apt/apt.conf.d`: +RUN wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + +# Update the repository and install the most recent version of the NGINX App Protect WAF Compiler package: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + apt-get update && DEBIAN_FRONTEND="noninteractive" apt-get install -y app-protect-compiler +``` + +### Ubuntu 18.04 / Ubuntu 20.04 / Ubuntu 22.04 / Ubuntu 24.04 Converter Docker Deployment Example + +```dockerfile +ARG OS_CODENAME +# Where OS_CODENAME can be: bionic/focal/jammy/noble +# syntax=docker/dockerfile:1 +# For Ubuntu 18.04 / 20.04 /22.04 / 24.04: +FROM ubuntu:${OS_CODENAME} + +# Install prerequisite packages: +RUN apt-get update && apt-get install -y apt-transport-https lsb-release ca-certificates wget gnupg2 + +# Download and add the NGINX signing keys: +RUN wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | \ + gpg --dearmor | tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null +RUN wget -qO - https://cs.nginx.com/static/keys/app-protect-security-updates.key | \ + gpg --dearmor | tee /usr/share/keyrings/app-protect-security-updates.gpg >/dev/null + +# Add NGINX App Protect WAF repositories: +RUN printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/app-protect/ubuntu `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/nginx-app-protect.list +RUN printf "deb [signed-by=/usr/share/keyrings/app-protect-security-updates.gpg] \ + https://pkgs.nginx.com/app-protect-security-updates/ubuntu `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/app-protect-security-updates.list + +# Download the apt configuration to `/etc/apt/apt.conf.d`: +RUN wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + +# Update the repository and install the most recent version of the NGINX App Protect WAF Compiler package: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + apt-get update && DEBIAN_FRONTEND="noninteractive" apt-get install -y app-protect-compiler +``` + +### Alpine 3.16 / Alpine 3.17 Converter Docker Deployment Example + +```dockerfile +# syntax=docker/dockerfile:1 +# For Alpine 3.16/3.17: +FROM alpine:3.16/3.17 + +# Download and add the NGINX signing keys: +RUN wget -O /etc/apk/keys/nginx_signing.rsa.pub https://cs.nginx.com/static/keys/nginx_signing.rsa.pub \ + && wget -O /etc/apk/keys/app-protect-security-updates.rsa.pub https://cs.nginx.com/static/keys/app-protect-security-updates.rsa.pub + +# Add NGINX Plus repository: +RUN printf "https://pkgs.nginx.com/app-protect/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | tee -a /etc/apk/repositories \ + && printf "https://pkgs.nginx.com/app-protect-security-updates/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" | tee -a /etc/apk/repositories + +# Update the repository and install the most recent version of the NGINX App Protect WAF Compiler package: +RUN --mount=type=secret,id=nginx-crt,dst=/etc/apk/cert.pem,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/apk/cert.key,mode=0644 \ + apk update && apk add app-protect-compiler +``` + +## Offline Installation + +To perform an offline installation of NGINX App Protect WAF you can use a host with access to the NGINX repository to download all the packages (including dependencies) to your local repository. + +### Example Deployment for CentOS/RHEL + +#### Add the NGINX App Protect WAF Packages to an Internal Repository + +On a host with access to the NGINX App Protect WAF repository: + +1. Install the `downloadonly` plugin for Yum: + + ```shell + yum -y install yum-plugin-downloadonly + ``` + +2. Download all NGINX App Protect WAF packages, including all dependencies: + + ```shell + mkdir -p /etc/packages/ + yum install --downloadonly --downloaddir=/etc/packages/ app-protect + ``` + +3. Download the `epel-release` dependency package: + + For CentOS: + + ```shell + yum install --downloadonly --downloaddir=/etc/packages/ epel-release + ``` + + For RHEL 7: + + ```shell + wget -P /etc/packages https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + + For RHEL 8.1+ / Oracle Linux 8.1+: + + ```shell + wget -P /etc/packages https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` + + For RHEL 9+: + + ```shell + wget -P /etc/packages https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +4. Add the packages in `/etc/packages` to your local repository. + + +#### Install NGINX App Protect WAF from an Internal Repository + +On an offline host: + +1. Add your internal repository configuration. +2. Install NGINX App Protect WAF: + + ```shell + yum -y install app-protect + ``` + +### Example Deployment for Debian/Ubuntu + +#### Add the NGINX App Protect WAF Packages to an Internal Repository + +On a host with access to the NGINX App Protect WAF repository: + +1. Download all NGINX App Protect WAF packages, including all dependencies: + + ```shell + mkdir -p /etc/packages/ + cd /etc/packages/ + apt-get update + for i in $(apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances app-protect | grep "^\w" | sort -u); do apt-get download $i 2>>errors.txt; done + ``` + +2. Add the packages in `/etc/packages` to your local repository. + + +#### Install NGINX App Protect WAF from an Internal Repository + +On an offline host: + +1. Add your internal repository configuration. +2. Install NGINX App Protect WAF: + + ```shell + apt-get update + apt-get install -y app-protect + ``` + +## Post-Installation Checks + +You can run the following commands to ensure that NGINX App Protect WAF enforcement is operational. + +1. Check that the three processes needed for NGINX App Protect WAF are running using `ps aux`: + - bd-socket-plugin + - nginx: master process + - nginx: worker process + + ```none + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 8 1.3 2.4 3486948 399092 ? Sl 09:11 0:02 /usr/share/ts/bin/bd-socket-plugin tmm_count 4 proc_cpuinfo_cpu_mhz 2000000 total_xml_memory 307200000 total_umu_max_size 3129344 sys_max_account_id 1024 no_static_config + root 14 0.0 0.1 71060 26680 ? S 09:11 0:00 nginx: master process /usr/sbin/nginx -c /tmp/policy/test_nginx.conf -g daemon off; + root 26 0.0 0.3 99236 52092 ? S 09:12 0:00 nginx: worker process + root 28 0.0 0.0 11788 2920 pts/0 Ss 09:12 0:00 bash + root 43 0.0 0.0 47460 3412 pts/0 R+ 09:14 0:00 ps aux + ``` + +2. Verify that there are no NGINX errors in the `/var/log/nginx/error.log` and that the policy compiled successfully: + + ```none + 2020/05/10 13:21:04 [notice] 402#402: APP_PROTECT { "event": "configuration_load_start", "configSetFile": "/opt/f5waf/config/config_set.json" } + 2020/05/10 13:21:04 [notice] 402#402: APP_PROTECT policy 'app_protect_default_policy' from: /etc/app_protect/conf/NginxDefaultPolicy.json compiled successfully + 2020/05/10 13:21:04 [notice] 402#402: APP_PROTECT { "event": "configuration_load_success", "software_version": "1.1.1", "attack_signatures_package":{"revision_datetime":"2019-07-16T12:21:31Z"},"completed_successfully":true} + 2020/05/10 13:21:04 [notice] 402#402: using the "epoll" event method + 2020/05/10 13:21:04 [notice] 402#402: nginx/1.17.6 (nginx-plus-r20) + 2020/05/10 13:21:04 [notice] 402#402: built by gcc 4.8.5 20150623 (Red Hat 4.8.5-36) (GCC) + 2020/05/10 13:21:04 [notice] 402#402: OS: Linux 3.10.0-957.27.2.el7.x86_64 + 2020/05/10 13:21:04 [notice] 402#402: getrlimit(RLIMIT_NOFILE): 1048576:1048576 + 2020/05/10 13:21:04 [notice] 406#406: start worker processes + 2020/05/10 13:21:04 [notice] 406#406: start worker process 407 + ``` + +3. Check that sending an attack signature in a request returns a response block page containing a support ID: + + ```none + Request: + http://10.240.185.211/?a= + +2. As with NGINX Open Source, default NGINX Plus image has the same default settings: + + - access and error logs are linked to the Docker log collector + - no volumes are specified: a Dockerfile can be used to create base images from which you can create new images with volumes specified, or volumes can be specified manually: + + ```dockerfile + VOLUME /usr/share/nginx/html + VOLUME /etc/nginx + ``` + + - no files are copied from the Docker host as a container is created: you can add `COPY` definitions to each Dockerfile, or the image you create can be used as the basis for another image + +3. Log in to [MyF5 Customer Portal](https://account.f5.com/myf5) and download your *nginx-repo.crt* and *nginx-repo.key* files. For a trial of NGINX Plus, the files are provided with your trial package. + +4. Copy the files to the directory where the Dockerfile is located. + +5. Create a Docker image, for example, `nginxplus` (note the final period in the command). + + ```shell + docker build --no-cache --secret id=nginx-key,src=nginx-repo.key --secret id=nginx-crt,src=nginx-repo.crt -t nginxplus . + ``` + + The `--no-cache` option tells Docker to build the image from scratch and ensures the installation of the latest version of NGINX Plus. If the Dockerfile was previously used to build an image without the `--no-cache` option, the new image uses the version of NGINX Plus from the previously built image from the Docker cache. + +6. Verify that the `nginxplus` image was created successfully with the `docker images` command: + + ```shell + $ docker images nginxplus + REPOSITORY TAG IMAGE ID CREATED SIZE + nginxplus latest ef2bf65931cf 6 seconds ago 91.2 MB + ``` + +7. Create a container based on this image, for example, `mynginxplus` container: + + ```shell + docker run --name mynginxplus -p 80:80 -d nginxplus + ``` + +8. Verify that the `mynginxplus` container is up and running with the `docker ps` command: + + ```shell + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS ... + eb7be9f439db nginxplus:latest "nginx -g 'daemon of 1 minute ago Up 15 seconds ... + + ... PORTS NAMES + ... 0.0.0.0:80->80/tcp mynginxplus + ``` + +NGINX Plus containers are controlled and managed in the same way as NGINX Open Source containers. + + + +## Managing Content and Configuration Files + +Content served by NGINX and NGINX configuration files can be managed in several ways: + +- files are maintained on the Docker host +- files are copied from the Docker host to a container +- files are maintained in the container + + + +### Maintaining Content and Configuration Files on the Docker Host + +When the container is created, you can mount a local directory on the Docker host to a directory in the container. The NGINX image uses the default NGINX configuration, which uses `/usr/share/nginx/html` as the container’s root directory and puts configuration files in `/etc/nginx`. For a Docker host with content in the local directory `/var/www` and configuration files in `/var/nginx/conf`, run the command: + +```shell +docker run --name mynginx2 \ + --mount type=bind,source=/var/www,target=/usr/share/nginx/html,readonly \ + --mount type=bind,source=/var/nginx/conf,target=/etc/nginx/conf,readonly \ + -p 80:80 \ + -d nginxplus +``` + +Any change made to the files in the local directories `/var/www and /var/nginx/conf` on the Docker host are reflected in the directories `/usr/share/nginx/html` and `/etc/nginx` in the container. The `readonly` option means these directories can be changed only on the Docker host, not from within the container. + + + +### Copying Content and Configuration Files from the Docker Host + +Docker can copy the content and configuration files from a local directory on the Docker host during container creation. Once a container is created, the files are maintained by creating a new container when files change or by modifying the files in the container. + +A simple way to copy the files is to create a Dockerfile with commands that are run during generation of a new Docker image based on the NGINX image. For the file‑copy (COPY) commands in the Dockerfile, the local directory path is relative to the build context where the Dockerfile is located. + +Let's assume that the content directory is `content` and the directory for configuration files is `conf`, both subdirectories of the directory where the Dockerfile is located. The NGINX image has the default NGINX configuration files, including `default.conf`, in the `/etc/nginx/conf.d` directory. To use the configuration files from the Docker host only, delete the default files with the `RUN` command: + +```dockerfile +FROM nginx +RUN rm /etc/nginx/conf.d/default.conf +COPY content /usr/share/nginx/html +COPY conf /etc/nginx +``` + +Create NGINX image by running the command from the directory where the Dockerfile is located. The period (“.”) at the end of the command defines the current directory as the build context, which contains the Dockerfile and the directories to be copied: + +```shell +docker build -t mynginx_image1 . +``` + +Create a container `mynginx3` based on the `mynginx_image1` image: + +```shell +docker run --name mynginx3 -p 80:80 -d mynginx_image1 +``` + +To make changes to the files in the container, use a helper container as described in the next section. + + + +### Maintaining Content and Configuration Files in the Container + +As SSH cannot be used to access the NGINX container, to edit the content or configuration files directly you need to create a helper container that has shell access. For the helper container to have access to the files, create a new image that has the proper Docker data volumes defined for the image: + +1. Copy nginx content and configuration files and define the volume for the image with the Dockerfile: + + ```dockerfile + FROM nginx + COPY content /usr/share/nginx/html + COPY conf /etc/nginx + VOLUME /usr/share/nginx/html + VOLUME /etc/nginx + ``` + +2. Create the new NGINX image by running the following command: + + ```shell + docker build -t mynginx_image2 . + ``` + +3. Create an NGINX container `mynginx4` based on the `mynginx_image2` image: + + ```shell + docker run --name mynginx4 -p 80:80 -d mynginx_image2 + ``` + +4. Start a helper container `mynginx4_files` that has a shell, providing access the content and configuration directories of the `mynginx4` container we just created: + + ```shell + $ docker run -i -t --volumes-from mynginx4 --name mynginx4_files debian /bin/bash + root@b1cbbad63dd1:/# + ``` + + where: + - the new `mynginx4_files` helper container runs in the foreground with a persistent standard input (the `-i` option) and a tty (the `-t` option). All volumes defined in `mynginx4` are mounted as local directories in the helper container. + - the `debian` argument means that the helper container uses the Debian image from Docker Hub. Because the NGINX image also uses Debian, it is most efficient to use Debian for the helper container, rather than having Docker load another operating system + - the `/bin/bash` argument means that the bash shell runs in the helper container, presenting a shell prompt that you can use to modify files as needed + +To start and stop the container, run the commands: + +```shell +docker start mynginx4_files +docker stop mynginx4_files +``` + +To exit the shell but leave the container running, press `Ctrl+p` followed by `Ctrl+q`. To regain shell access to a running container, run this command: + +```shell +docker attach mynginx4_files +``` + +To exit the shell and terminate the container, run the `exit` command. + + + +## Managing Logging + +You can use default logging or customize logging. + + +### Using Default Logging + +By default, the NGINX image is configured to send NGINX [access log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) and [error log](https://nginx.org/en/docs/ngx_core_module.html#error_log) to the Docker log collector. This is done by linking them to `stdout` and `stderr`: all messages from both logs are then written to the file `/var/lib/docker/containers/container-ID/container-ID-json.log` on the Docker host. The container‑ID is the long‑form ID returned when you [create a container](#docker_oss_image). To display the long form ID, run the command: + +```shell +docker inspect --format '{{ .Id }}' container-name +``` + +You can use both the Docker command line and the Docker Engine API to extract the log messages. + +To extract log messages from the command line, run the command: + +```shell +docker logs container-name +``` + +To extract log messages using the Docker Remote API, send a `GET` request using the Docker Unix sock: + +```shell +curl --unix-sock /var/run/docker-sock http://localhost/containers/container-name/logs?stdout=1&stderr=1 +``` + +To include only access log messages in the output, include only `stdout=1`. To limit the output to error log messages, include only `stderr=1`. For other available options, see [Get container logs](https://docs.docker.com/engine/api/v1.39/#operation/ContainerLogs) section of the [Docker Engine API](https://docs.docker.com/engine/api/v1.39/) documentation. + + + +### Using Customized Logging + +If you want to configure logging differently for certain configuration blocks (such as `server {}` and `location {}`), define a Docker volume for the directory in which to store the log files in the container, create a helper container to access the log files, and use any logging tools. To implement this, create a new image that contains the volume or volumes for the logging files. + +For example, to configure NGINX to store log files in `/var/log/nginx/log`, add a `VOLUME` definition for this directory to the Dockerfile (provided that content and configuration Files are [managed in the container](#manage_container)): + +```dockerfile +FROM nginx +COPY content /usr/share/nginx/html +COPY conf /etc/nginx +VOLUME /var/log/nginx/log +``` + +Then you can [create an image](#docker_plus_image) and use it to create an NGINX container and a helper container that have access to the logging directory. The helper container can have any desired logging tools installed. + + + +## Controlling NGINX + +Since there is no direct access to the command line of the NGINX container, NGINX commands cannot be sent to a container directly. Instead, [signals](https://nginx.org/en/docs/control.html) can be sent to a container via Docker `kill` command. + +To reload the NGINX configuration, send the `HUP` signal to Docker: + +```shell +docker kill -s HUP container-name +``` + +To restart NGINX, run this command to restart the container: + +```shell +docker restart container-name +``` diff --git a/content/nginx/admin-guide/installing-nginx/installing-nginx-open-source.md b/content/nginx/admin-guide/installing-nginx/installing-nginx-open-source.md new file mode 100644 index 000000000..1ab54b023 --- /dev/null +++ b/content/nginx/admin-guide/installing-nginx/installing-nginx-open-source.md @@ -0,0 +1,1049 @@ +--- +description: Install NGINX Open Source either as a prebuilt package or from source, + following step-by-step instructions for all supported Linux distributions. +docs: DOCS-410 +doctypes: +- task +title: Installing NGINX Open Source +toc: true +weight: 200 +--- + +This article explains how to install NGINX Open Source. + + +## Choosing Between a Stable or a Mainline Version + +NGINX Open Source is available in two versions: + +- **Mainline** – Includes the latest features and bug fixes and is always up to date. It is reliable, but it may include some experimental modules, and it may also have some number of new bugs. +- **Stable** – Doesn’t include all of the latest features, but has critical bug fixes that are always backported to the mainline version. We recommend the stable version for production servers. + + +## Choosing Between a Prebuilt Package and Compiling from Source + +Both the NGINX Open Source mainline and stable versions can be installed in two ways: + +- As a prebuilt binary package. This is a quick and easy way to install NGINX Open Source. The package includes almost all official NGINX modules and is available for most popular operating systems. See [Installing a Prebuilt Package](#prebuilt). +- As binaries you compile from source. This way is more flexible: you can add particular modules, including third‑party modules, or apply the latest security patches. See [Compiling and Installing from Source](#sources) for details. + + +## Installing a Prebuilt Package + +Installing NGINX Open Source from a package is much easier and faster than building from source, but building from source enables you to compile in non-standard modules. Prebuilt packages are available for most popular Linux distributions, including CentOS, Debian, Red Hat Enterprise Linux (RHEL), SUSE Linux Enterprise Server (SLES), and Ubuntu. See [Linux packages](https://nginx.org/en/linux_packages.html) at **nginx.org** for the list of currently supported operating systems. + + +### Modules Included in a Prebuilt Package + +See [Source packages](https://nginx.org/en/linux_packages.html#sourcepackages) at **nginx.org** for the list of modules included in each prebuilt package. + + +### Installing Prebuilt RHEL, CentOS, Oracle Linux, AlmaLinux, Rocky Linux Packages + +NGINX, Inc. provides packages for the following CentOS, Oracle Linux, RHEL, AlmaLinux and Rocky Linux versions: + + +{{}} + +|Version | Supported Platforms | +| ---| --- | +|7.4+ | x86_64, aarch64/arm64 | +|8x | x86_64, aarch64/arm64, s390x | +|9x | x86_64, aarch64/arm64, s390x | + +{{}} + +The package can be installed from: + +- A default RHEL / CentOS / Oracle Linux / AlmaLinux / Rocky Linux repository. This is the quickest way, but generally the provided package is outdated. +- The official repo at **nginx.org**. You have to set up the `yum` repository the first time, but after that the provided package is always up to date. + +#### Installing a Prebuilt CentOS/RHEL/ Oracle Linux/AlmaLinux/Rocky Linux Package from an OS Repository + +1. Install the EPEL repository: + + ```shell + sudo yum install epel-release + ``` + +2. Update the repository: + + ```shell + sudo yum update + ``` + +3. Install NGINX Open Source: + + ```shell + sudo yum install nginx + ``` + +4. Verify the installation: + + ```shell + sudo nginx -v + ``` + +#### Installing a Prebuilt RHEL/CentOS/Oracle Linux/AlmaLinux/Rocky Linux Package from the Official NGINX Repository + +1. Install the prerequisites: + + ```shell + sudo yum install yum-utils + ``` + +2. Set up the `yum` repository for RHEL/CentOS/Oracle Linux/AlmaLinux/Rocky Linux by creating the file **nginx.repo** in **/etc/yum.repos.d**, for example using `vi`: + + ```shell + sudo vi /etc/yum.repos.d/nginx.repo + ``` + +3. Add the following lines to **nginx.repo**: + + ```none + [nginx-stable] + name=nginx stable repo + baseurl=http://nginx.org/packages/centos/$releasever/$basearch/ + gpgcheck=1 + enabled=1 + gpgkey=https://nginx.org/keys/nginx_signing.key + module_hotfixes=true + + [nginx-mainline] + name=nginx mainline repo + baseurl=http://nginx.org/packages/mainline/centos/$releasever/$basearch/ + gpgcheck=1 + enabled=0 + gpgkey=https://nginx.org/keys/nginx_signing.key + module_hotfixes=true + ``` + + where the `stable` or `mainline` element points to the latest stable or mainline version of NGINX Open Source. By default, the repository for stable nginx packages is used. If you would like to use mainline nginx packages, run the following command: + + ```shell + sudo yum-config-manager --enable nginx-mainline + ``` + +4. Save the changes and quit `vi` (press ESC and type `wq` at the `:` prompt). + +5. Update the repository: + + ```shell + sudo yum update + ``` + +6. Install the NGINX Open Source package: + + ```shell + sudo yum install nginx + ``` + + When prompted to accept the GPG key, verify that the fingerprint matches: + `8540 A6F1 8833 A80E 9C16 53A4 2FD2 1310 B49F 6B46`, + `573B FD6B 3D8F BC64 1079 A6AB ABF5 BD82 7BD9 BF62`, + `9E9B E90E ACBC DE69 FE9B 204C BCDC D8A3 8D88 A2B3`, + and if so, accept it. + +7. Start NGINX Open Source: + + ```shell + sudo nginx + ``` + +8. Verify that NGINX Open Source is up and running: + + ```shell + curl -I 127.0.0.1 + HTTP/1.1 200 OK + Server: nginx/1.27.0 + ``` + + +### Installing Prebuilt Debian Packages + +NGINX provides packages for the following Debian operating systems: + + +{{}} + +|Version | Codename | Supported Platforms | +| ---| ---| --- | +|11.x | bullseye | x86_64, aarch64/arm64 | +|12.x | bookworm | x86_64, aarch64/arm64 | + +{{}} + +The package can be installed from: + +- A default Debian repository. This is the quickest way, but generally the provided package is outdated. +- The official repo at **nginx.org**. You have to set up the `apt-get` repository the first time, but after that the provided package is always up to date. + + +#### Installing a Prebuilt Debian Package from an OS Repository + +1. Update the Debian repository information: + + ```shell + sudo apt-get update + ``` + +2. Install the NGINX Open Source package: + + ```shell + sudo apt-get install nginx + ``` + +3. Verify the installation: + + ```shell + sudo nginx -v + ``` + +#### Installing a Prebuilt Debian Package from the Official NGINX Repository + +1. Install the prerequisites: + + ```shell + sudo apt install curl gnupg2 ca-certificates lsb-release debian-archive-keyring + ``` + +2. Import an official nginx signing key so `apt` could verify the packages authenticity. Fetch the key: + + ```shell + curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor \ + | sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + ``` + +3. Verify that the downloaded file contains the proper key: + + ```shell + gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg + ``` + + The output should contain the full fingerprints: + `8540 A6F1 8833 A80E 9C16 53A4 2FD2 1310 B49F 6B46`, + `573B FD6B 3D8F BC64 1079 A6AB ABF5 BD82 7BD9 BF62`, + `9E9B E90E ACBC DE69 FE9B 204C BCDC D8A3 8D88 A2B3` + as follows: + + ```none + pub rsa4096 2024-05-29 [SC] + 8540A6F18833A80E9C1653A42FD21310B49F6B46 + uid nginx signing key + + pub rsa2048 2011-08-19 [SC] [expires: 2027-05-24] + 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 + uid nginx signing key + + pub rsa4096 2024-05-29 [SC] + 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3 + uid nginx signing key + ``` + + If the fingerprint is different, remove the file. + + +4. To set up the `apt` repository for stable nginx packages, run the following command: + + ```shell + echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + http://nginx.org/packages/debian `lsb_release -cs` nginx" \ + | sudo tee /etc/apt/sources.list.d/nginx.list + ``` + + If you would like to use `mainline` nginx packages, run the following command instead: + + ```shell + echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + http://nginx.org/packages/mainline/debian `lsb_release -cs` nginx" \ + | sudo tee /etc/apt/sources.list.d/nginx.list + ``` + +5. Set up repository pinning to prefer our packages over distribution-provided ones: + + ```shell + echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority: 900\n" \ + | sudo tee /etc/apt/preferences.d/99nginx + ``` + +6. Install the NGINX package: + + ```shell + sudo apt update + sudo apt install nginx + ``` + +7. Start NGINX Open Source: + + ```shell + sudo nginx + ``` + +8. Verify that NGINX Open Source is up and running: + + ```shell + curl -I 127.0.0.1 + HTTP/1.1 200 OK + Server: nginx/1.27.0 + ``` + + +### Installing Prebuilt Ubuntu Packages + +NGINX provides packages for the following Ubuntu operating systems: + + +{{}} + +|Version | Codename | Supported Platforms | +| ---| ---| --- | +|20.04 | focal | x86_64, aarch64/arm64, s390x | +|22.04 | jammy | x86_64, aarch64/arm64, s390x | +|22.10 | kinetic | x86_64, aarch64/arm64 | +|23.04 | lunar | x86_64, aarch64/arm64 | + +{{}} + +The package can be installed from: + +- A default Ubuntu repository. This is the quickest way, but generally the provided package is outdated. +- The official repo at **nginx.org**. You have to set up the `apt-get` repository the first time, but after that the provided package is always up to date. + +#### Installing a Prebuilt Ubuntu Package from an Ubuntu Repository + +1. Update the Ubuntu repository information: + + ```shell + sudo apt-get update + ``` + +2. Install the package: + + ```shell + sudo apt-get install nginx + ``` + +3. Verify the installation: + + ```shell + sudo nginx -v + ``` + +#### Installing a Prebuilt Ubuntu Package from the Official NGINX Repository + +1. Install the prerequisites: + + ```shell + sudo apt install curl gnupg2 ca-certificates lsb-release ubuntu-keyring + ``` + +2. Import an official nginx signing key so apt could verify the packages authenticity. Fetch the key: + + ```shell + curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor \ + | sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + ``` + +3. Verify that the downloaded file contains the proper key: + + ```shell + gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg + ``` + + The output should contain the full fingerprints: + `8540 A6F1 8833 A80E 9C16 53A4 2FD2 1310 B49F 6B46`, + `573B FD6B 3D8F BC64 1079 A6AB ABF5 BD82 7BD9 BF62`, + `9E9B E90E ACBC DE69 FE9B 204C BCDC D8A3 8D88 A2B3` + as follows: + + ```none + pub rsa4096 2024-05-29 [SC] + 8540A6F18833A80E9C1653A42FD21310B49F6B46 + uid nginx signing key + + pub rsa2048 2011-08-19 [SC] [expires: 2027-05-24] + 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 + uid nginx signing key + + pub rsa4096 2024-05-29 [SC] + 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3 + uid nginx signing key + ``` + If the fingerprint is different, remove the file. + +4. To set up the `apt` repository for stable nginx packages, run the following command: + + ```shell + echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + http://nginx.org/packages/ubuntu `lsb_release -cs` nginx" \ + | sudo tee /etc/apt/sources.list.d/nginx.list + ``` + + If you would like to use `mainline` nginx packages, run the following command instead: + + ```shell + echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + http://nginx.org/packages/mainline/ubuntu `lsb_release -cs` nginx" \ + | sudo tee /etc/apt/sources.list.d/nginx.list + ``` + +5. Set up repository pinning to prefer our packages over distribution-provided ones: + + ```shell + echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority: 900\n" \ + | sudo tee /etc/apt/preferences.d/99nginx + ``` + +6. Install NGINX Open Source: + + ```shell + sudo apt update + sudo apt install nginx + ``` + +7. Start NGINX Open Source: + + ```shell + sudo nginx + ``` + +8. Verify that NGINX Open Source is up and running: + + ```shell + curl -I 127.0.0.1 + HTTP/1.1 200 OK + Server: nginx/1.27.0 + ``` + + +### Installing SUSE Packages + +NGINX provides packages for SUSE Linux Enterprise Server: + + +{{}} + +|Version | Supported Platforms | +| ---| --- | +|SLES 12 SP5+ | x86_64 | +|SLES 15 SP2+ | x86_64 | + +{{}} + +#### Installing a Prebuilt SUSE Package from the Official NGINX Repository + +1. Install the prerequisites: + + ```shell + sudo zypper install curl ca-certificates gpg2 + ``` + +2. To set up the `zypper` repository for stable nginx packages, run the following command: + + ```shell + sudo zypper addrepo --gpgcheck --type yum --refresh --check \ + 'http://nginx.org/packages/sles/$releasever_major' nginx-stable + ``` + +3. If you would like to use mainline nginx packages, run the following command instead: + + ```shell + sudo zypper addrepo --gpgcheck --type yum --refresh --check \ + 'http://nginx.org/packages/mainline/sles/$releasever_major' nginx-mainline + ``` + +4. Import an official nginx signing key so `zypper/rpm` could verify the packages authenticity. Fetch the key: + + ```shell + curl -o /tmp/nginx_signing.key https://nginx.org/keys/nginx_signing.key + ``` + +5. Verify that the downloaded file contains the proper key: + + ```shell + gpg --with-fingerprint /tmp/nginx_signing.key + ``` + + The output should contain the full fingerprints: + `8540 A6F1 8833 A80E 9C16 53A4 2FD2 1310 B49F 6B46`, + `573B FD6B 3D8F BC64 1079 A6AB ABF5 BD82 7BD9 BF62`, + `9E9B E90E ACBC DE69 FE9B 204C BCDC D8A3 8D88 A2B3` + as follows: + + ```none + pub rsa4096 2024-05-29 [SC] + 8540A6F18833A80E9C1653A42FD21310B49F6B46 + uid nginx signing key + + pub rsa2048 2011-08-19 [SC] [expires: 2027-05-24] + 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 + uid nginx signing key + + pub rsa4096 2024-05-29 [SC] + 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3 + uid nginx signing key + ``` + If the fingerprint is different, remove the file. + +6. Import the key to the rpm database: + + ```shell + sudo rpmkeys --import /tmp/nginx_signing.key + ``` + +7. To install nginx, run the command: + + ```shell + sudo zypper install nginx + ``` + + + +### Installing Prebuilt Alpine Linux Packages + +NGINX provides packages for the following Alpine Linux operating systems: + + +{{}} + +|Version | Supported Platforms | +| ---| --- | +|3.15 | x86_64, aarch64/arm64 | +|3.16 | x86_64, aarch64/arm64 | +|3.17 | x86_64, aarch64/arm64 | +|3.18 | x86_64, aarch64/arm64 | + +{{}} + +The package can be installed from the official repo at **nginx.org**. You have to set up the `apt-get` repository the first time, but after that the provided package is always up to date. + + +#### Installing a Prebuilt Alpine Linux Package from the Official NGINX Repository + +1. Install the prerequisites: + + ```shell + sudo apk add openssl curl ca-certificates + ``` + +2. To set up the apk repository for stable nginx packages, run the command: + + ```shell + printf "%s%s%s\n" \ + "http://nginx.org/packages/alpine/v" \ + `egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release` \ + "/main" \ + | sudo tee -a /etc/apk/repositories + ``` + + For mainline nginx packages, run the following command instead: + + ```shell + printf "%s%s%s\n" \ + "http://nginx.org/packages/mainline/alpine/v" \ + `egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release` \ + "/main" \ + | sudo tee -a /etc/apk/repositories + ``` + +3. Import an official nginx signing key so apk could verify the packages authenticity. Fetch the key: + + ```shell + curl -o /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub + ``` + +4. Verify that the downloaded file contains the proper key: + + ```shell + openssl rsa -pubin -in /tmp/nginx_signing.rsa.pub -text -noout + ``` + + The output should contain the following modulus: + + ```none + Public-Key: (2048 bit) + Modulus: + 00:fe:14:f6:0a:1a:b8:86:19:fe:cd:ab:02:9f:58: + 2f:37:70:15:74:d6:06:9b:81:55:90:99:96:cc:70: + 5c:de:5b:e8:4c:b2:0c:47:5b:a8:a2:98:3d:11:b1: + f6:7d:a0:46:df:24:23:c6:d0:24:52:67:ba:69:ab: + 9a:4a:6a:66:2c:db:e1:09:f1:0d:b2:b0:e1:47:1f: + 0a:46:ac:0d:82:f3:3c:8d:02:ce:08:43:19:d9:64: + 86:c4:4e:07:12:c0:5b:43:ba:7d:17:8a:a3:f0:3d: + 98:32:b9:75:66:f4:f0:1b:2d:94:5b:7c:1c:e6:f3: + 04:7f:dd:25:b2:82:a6:41:04:b7:50:93:94:c4:7c: + 34:7e:12:7c:bf:33:54:55:47:8c:42:94:40:8e:34: + 5f:54:04:1d:9e:8c:57:48:d4:b0:f8:e4:03:db:3f: + 68:6c:37:fa:62:14:1c:94:d6:de:f2:2b:68:29:17: + 24:6d:f7:b5:b3:18:79:fd:31:5e:7f:4c:be:c0:99: + 13:cc:e2:97:2b:dc:96:9c:9a:d0:a7:c5:77:82:67: + c9:cb:a9:e7:68:4a:e1:c5:ba:1c:32:0e:79:40:6e: + ef:08:d7:a3:b9:5d:1a:df:ce:1a:c7:44:91:4c:d4: + 99:c8:88:69:b3:66:2e:b3:06:f1:f4:22:d7:f2:5f: + ab:6d + Exponent: 65537 (0x10001) + ``` + +5. Move the key to `apk` trusted keys storage: + + ```shell + sudo mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/ + ``` + +6. To install nginx, run the command: + + ```shell + sudo apk add nginx + ``` + + The `@nginx` tag should also be specified when installing packages with [dynamic modules]({{< relref "../dynamic-modules/dynamic-modules.md" >}}): + + ```shell + sudo apk add nginx-module-image-filter@nginx nginx-module-njs@nginx + ``` + + +### Installing Prebuilt Amazon Linux Packages + +NGINX provides packages for + +- Amazon Linux 2 (LTS) x86_64, aarch64/arm64 +- Amazon Linux 2023 x86_64, aarch64/arm64 + +#### Installing a Prebuilt Alpine Linux Package from the Official NGINX Repository + +1. Install the prerequisites: + + ```shell + sudo yum install yum-utils + ``` + +2. To set up the `yum` repository for Amazon Linux 2, create the file named `/etc/yum.repos.d/nginx.repo` with the following contents: + + ```none + [nginx-stable] + name=nginx stable repo + baseurl=http://nginx.org/packages/amzn2/$releasever/$basearch/ + gpgcheck=1 + enabled=1 + gpgkey=https://nginx.org/keys/nginx_signing.key + module_hotfixes=true + + [nginx-mainline] + name=nginx mainline repo + baseurl=http://nginx.org/packages/mainline/amzn2/$releasever/$basearch/ + gpgcheck=1 + enabled=0 + gpgkey=https://nginx.org/keys/nginx_signing.key + module_hotfixes=true + ``` + + To set up the `yum` repository for Amazon Linux 2023, create the file named `/etc/yum.repos.d/nginx.repo` with the following contents: + + ```none + [nginx-stable] + name=nginx stable repo + baseurl=http://nginx.org/packages/amzn/2023/$basearch/ + gpgcheck=1 + enabled=1 + gpgkey=https://nginx.org/keys/nginx_signing.key + module_hotfixes=true + + [nginx-mainline] + name=nginx mainline repo + baseurl=http://nginx.org/packages/mainline/amzn/2023/$basearch/ + gpgcheck=1 + enabled=0 + gpgkey=https://nginx.org/keys/nginx_signing.key + module_hotfixes=true + ``` + + By default, the repository for `stable` nginx packages is used. If you would like to use `mainline` nginx packages, run the following command: + + ```shell + sudo yum-config-manager --enable nginx-mainline + ``` + +3. Install nginx: + + ```shell + sudo yum install nginx + ``` + + When prompted to accept the GPG key, verify that the fingerprint matches: + `8540 A6F1 8833 A80E 9C16 53A4 2FD2 1310 B49F 6B46`, + `573B FD6B 3D8F BC64 1079 A6AB ABF5 BD82 7BD9 BF62`, + `9E9B E90E ACBC DE69 FE9B 204C BCDC D8A3 8D88 A2B3`, + and if so, accept it. + + +## Compiling and Installing from Source + +Compiling NGINX Open Source from source affords more flexibility than prebuilt packages: you can add particular modules (from NGINX or third parties), and apply latest security patches. + + +### Installing NGINX Dependencies + +Prior to compiling NGINX Open Source from source, you need to install libraries for its dependencies: + +- [PCRE](http://pcre.org/) – Supports regular expressions. Required by the NGINX [Core](https://nginx.org/en/docs/ngx_core_module.html) and [Rewrite](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html) modules. + + ```shell + wget github.com/PCRE2Project/pcre2/releases/download/pcre2-10.42/pcre2-10.42.tar.gz + tar -zxf pcre2-10.42.tar.gz + cd pcre2-10.42 + ./configure + make + sudo make install + ``` + +- [zlib](http://www.zlib.net/) – Supports header compression. Required by the NGINX [Gzip](https://nginx.org/en/docs/http/ngx_http_gzip_module.html) module. + + ```shell + wget http://zlib.net/zlib-1.2.13.tar.gz + tar -zxf zlib-1.2.13.tar.gz + cd zlib-1.2.13 + ./configure + make + sudo make install + ``` + +- [OpenSSL](https://www.openssl.org/) – Supports the HTTPS protocol. Required by the NGINX [SSL](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module and others. + + ```shell + wget http://www.openssl.org/source/openssl-1.1.1v.tar.gz + tar -zxf openssl-1.1.1v.tar.gz + cd openssl-1.1.1v + ./Configure darwin64-x86_64-cc --prefix=/usr + make + sudo make install + ``` + + +### Downloading the Sources + +Download the source files for both the stable and mainline versions from [**nginx.org**](https://www.nginx.org/en/download.html). + +To download and unpack the source for the latest _mainline_ version, run: + +```shell +wget https://nginx.org/download/nginx-1.27.0.tar.gz +tar zxf nginx-1.27.0.tar.gz +cd nginx-1.27.0 +``` + +To download and unpack source files for the latest _stable_ version, run: + +```shell +wget https://nginx.org/download/nginx-1.26.1.tar.gz +tar zxf nginx-1.26.1.tar.gz +cd nginx-1.26.1 +``` + + +### Configuring the Build Options + +Configure options are specified with the `./configure` script that sets up various NGINX parameters, including paths to source and configuration files, compiler options, connection processing methods, and the list of modules. The script finishes by creating the `Makefile` required to compile the code and install NGINX Open Source. + +An example of options to the `configure` script (should be typed as a single line): + +```shell +./configure +--sbin-path=/usr/local/nginx/nginx +--conf-path=/usr/local/nginx/nginx.conf +--pid-path=/usr/local/nginx/nginx.pid +--with-pcre=../pcre2-10.42 +--with-zlib=../zlib-1.2.13 +--with-http_ssl_module +--with-stream +--with-mail=dynamic +--add-module=/usr/build/nginx-rtmp-module +--add-dynamic-module=/usr/build/3party_module +``` + + +#### Configuring NGINX Paths + +The `configure` script allows you to set paths to NGINX binary and configuration files, and to dependent libraries such as PCRE or SSL, in order to link them statically to the NGINX binary. + + + +{{}} + +|Parameter | Description | +| ---| --- | +|``--prefix=`` | Directory for NGINX files, and the base location for all relative paths set by the other `configure` script options (excluding paths to libraries) and for the path to the **nginx.conf** configuration file. Default: **/usr/local/nginx**. | +|``--sbin-path=`` | Name of the NGINX executable file, which is used only during installation. Default: **/sbin/nginx | +|``--conf-path=`` | Name of the NGINX configuration file. You can, however, always override this value at startup by specifying a different file with the ``-c `` option on the `nginx` command line. Default: **conf/nginx.conf | +|``--pid-path=`` | Name of the **nginx.pid** file, which stores the process ID of the `nginx` master process. After installation, the path to the filename can be changed with the [pid](https://nginx.org/en/docs/ngx_core_module.html#pid) directive in the NGINX configuration file. Default: **/logs/nginx.pid | +|``--error-log-path=`` | Name of the primary log file for errors, warnings, and diagnostic data. After installation, the filename can be changed with the [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log) directive in the NGINX configuration file. Default: **/logs/error.log | +|``--http-log-path=`` | Name of the primary log file for requests to the HTTP server. After installation, the filename can always be changed with the [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) directive in the NGINX configuration file. Default: **/logs/access.log | +|``--user=`` | Name of the unprivileged user whose credentials are used by the NGINX worker processes. After installation, the name can be changed with the [user](https://nginx.org/en/docs/ngx_core_module.html#user) directive in the NGINX configuration file. Default: ``nobody`` | +|``--group=`` | Name of the group whose credentials are used by the NGINX worker processes. After installation, the name can be changed with the [user](https://nginx.org/en/docs/ngx_core_module.html#user) directive in the NGINX configuration file. Default: the value set by the ``--user`` option. | +|``--with-pcre=`` | Path to the source for the PCRE library, which is required for regular expressions support in the [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) directive and the [Rewrite](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html) module. | +|``--with-pcre-jit`` | Builds the PCRE library with “just-in-time compilation” support (the [pcre_jit](https://nginx.org/en/docs/ngx_core_module.html#pcre_jit) directive). | +|``--with-zlib=`` | Path to the source for the `zlib` library, which is required by the [Gzip](https://nginx.org/en/docs/http/ngx_http_gzip_module.html) module. | + +{{}} + + + +#### Configuring NGINX GCC Options + +With the `configure` script you can also specify compiler‑related options. + + +{{}} + +|Parameter | Description | +| ---| --- | +|``--with-cc-opt=""`` | Additional parameters that are added to the ``CFLAGS`` variable. When using the system PCRE library under FreeBSD, the mandatory value is ``--with-cc-opt="-I /usr/local/include"``. If the number of files supported by ``select()`` needs to be increased, it can also specified here as in this example: ``--with-cc-opt="-D FD_SETSIZE=2048"``. | +|``--with-ld-opt=""`` | Additional parameters that are used during linking. When using the system PCRE library under FreeBSD, the mandatory value is ``--with-ld-opt="-L /usr/local/lib"``. | + +{{}} + + + +#### Specifying NGINX Connection Processing Methods + +With the `configure` script you can redefine the method for event‑based polling. For more information, see [Connection processing methods](https://nginx.org/en/docs/events.html) in the NGINX reference documentation. + + +{{}} + +|Module Name | Description | +| ---| --- | +|``--with-select_module``, ``--without-select_module`` | Enables or disables building a module that enable NGINX to work with the ``select()`` method. The modules is built automatically if the platform does not appear to support more appropriate methods such as `kqueue`, `epoll`, or `/dev/poll`. | +|``--with-poll_module``, ``--without-poll_module`` | Enables or disables building a module that enables NGINX to work with the ``poll()`` method. The module is built automatically if the platform does not appear to support more appropriate methods such as `kqueue`, `epoll`, or `/dev/poll`. | + +{{}} + + + + +#### Selecting the NGINX Modules to Build + +NGINX consists of a set of function‑specific _modules_, which are specified with `configure` script along with other build options. + +Some modules are built by default – they do not have to be specified with the `configure` script. Default modules can however be explicitly excluded from the NGINX binary with the `--without-` option on the `configure` script. + +Modules not included by default, as well as third‑party modules, must be explicitly specified in the `configure` script together with other build options. Such modules can be linked to NGINX binary either _statically_ (they are then loaded each time NGINX starts) or _dynamically_ (they are loaded only if associated directives are included in the NGINX configuration file. + + +#### Modules Built by Default + +If you do not need a module that is built by default, you can disable it by naming it with the `--without-` option on the `configure` script, as in this example which disables the [Empty GIF](https://nginx.org/en/docs/http/ngx_http_empty_gif_module.html) module (should be typed as a single line): + +```shell +./configure +--sbin-path=/usr/local/nginx/nginx +--conf-path=/usr/local/nginx/nginx.conf +--pid-path=/usr/local/nginx/nginx.pid +--with-http_ssl_module +--with-stream +--with-pcre=../pcre2-10.42 +--with-zlib=../zlib-1.2.13 +--without-http_empty_gif_module +``` + + + +{{}} + +|Module Name | Description | +| ---| --- | +|[http_access_module](https://nginx.org/en/docs/http/ngx_http_access_module.html) | Accepts or denies requests from specified client addresses. | +|[http_auth_basic_module](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) | Limits access to resources by validating the user name and password using the HTTP Basic Authentication protocol. | +|[http_autoindex_module](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html) | Processes requests ending with the forward-slash character (_/_) and produces a directory listing. | +|[http_browser_module](https://nginx.org/en/docs/http/ngx_http_browser_module.html) | Creates variables whose values depend on the value of the ``User-Agent`` request header. | +|[http_charset_module](https://nginx.org/en/docs/http/ngx_http_charset_module.html) | Adds the specified character set to the ``Content-Type`` response header. Can convert data from one character set to another. | +|[http_empty_gif_module](https://nginx.org/en/docs/http/ngx_http_empty_gif_module.html) | Emits a single-pixel transparent GIF. | +|[http_fastcgi_module](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html) | Passes requests to a FastCGI server. | +|[http_geo_module](https://nginx.org/en/docs/http/ngx_http_geo_module.html) | Creates variables with values that depend on the client IP address. | +|[http_gzip_module](https://nginx.org/en/docs/http/ngx_http_gzip_module.html) | Compresses responses using `gzip`, reducing the amount of transmitted data by half or more. | +|[http_limit_conn_module](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html) | Limits the number of connections per a defined key, in particular, the number of connections from a single IP address. | +|[http_limit_req_module](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html) | Limits the request processing rate per a defined key, in particular, the processing rate of requests coming from a single IP address. | +|[http_map_module](https://nginx.org/en/docs/http/ngx_http_map_module.html) | Creates variables whose values depend on the values of other variables. | +|[http_memcached_module](https://nginx.org/en/docs/http/ngx_http_memcached_module.html) | Passes requests to a memcached server. | +|[http_proxy_module](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) | Passes HTTP requests to another server. | +|[http_referer_module](https://nginx.org/en/docs/http/ngx_http_referer_module.html) | Blocks requests with invalid values in the `Referer` header. | +|[http_rewrite_module](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html) | Changes the request URI using regular expressions and return redirects; conditionally selects configurations. Requires the [PCRE](http://pcre.org/) library. | +|[http_scgi_module](https://nginx.org/en/docs/http/ngx_http_scgi_module.html) | Passes requests to an SCGI server. | +|[http_ssi_module](https://nginx.org/en/docs/http/ngx_http_ssi_module.html) | Processes SSI (Server Side Includes) commands in responses passing through it. | +|[http_split_clients_module](https://nginx.org/en/docs/http/ngx_http_split_clients_module.html) | Creates variables suitable for A/B testing, also known as split testing. | +|[http_upstream_hash_module](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash) | Enables the generic Hash load-balancing method. | +|[http_upstream_ip_hash_module](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash) | Enables the IP Hash load-balancing method. | +|[http_upstream_keepalive_module](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive) | Enables keepalive connections. | +|[http_upstream_least_conn_module](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_conn) | Enables the Least Connections load-balancing method. | +|[http_upstream_zone_module](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) | Enables shared memory zones. | +|[http_userid_module](https://nginx.org/en/docs/http/ngx_http_userid_module.html) | Sets cookies suitable for client identification. | +|[http_uwsgi_module](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html) | Passes requests to a uwsgi server. | + +{{}} + + + +#### Including Modules Not Built by Default + +Many NGINX modules are not built by default, and must be listed on the `configure` command line to be built. + +The [mail](https://nginx.org/en/docs/mail/ngx_mail_core_module.html), [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html), [geoip](https://nginx.org/en/docs/http/ngx_http_geoip_module.html), [image_filter](https://nginx.org/en/docs/http/ngx_http_image_filter_module.html), [perl](https://nginx.org/en/docs/http/ngx_http_perl_module.html) and [xslt](https://nginx.org/en/docs/http/ngx_http_xslt_module.html) modules can be compiled as dynamic. See [Dynamic Modules]({{< relref "../dynamic-modules/dynamic-modules.md" >}}) for details. + +An example of the `configure` command that includes nondefault modules (should be typed as a single line): + +```shell +./configure +--sbin-path=/usr/local/nginx/nginx +--conf-path=/usr/local/nginx/nginx.conf +--pid-path=/usr/local/nginx/nginx.pid +--with-pcre=../pcre2-10.42 +--with-zlib=../zlib-1.2.13 +--with-http_ssl_module +--with-stream +--with-mail +``` + +{{}} + +|Module Name | Description | +| ---| --- | + +{{}} + +- - ``--with-cpp_test_module`` + - Tests the C++ compatibility of header files. + +- - ``--with-debug`` + - Enables the [debugging log](https://docs.nginx.com/nginx/admin-guide/monitoring/debugging/). + +- - ``--with-file-aio`` + - Enables asynchronous I/O. + + - Allows using [Google Performance tools](https://github.com/gperftools/gperftools) library. + +- - ``--`` [with-http_addition_module](https://nginx.org/en/docs/http/ngx_http_addition_module.html) + - Adds text before and after a response. + +- - ``--`` [with-http_auth_request_module](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html) + - Implements client authorization based on the result of a subrequest. + +- - ``--`` [with-http_dav_module](https://nginx.org/en/docs/http/ngx_http_dav_module.html) + - Enables file management automation using the WebDAV protocol. + +- - ``--with-http_degradation_module`` + - Allows returning an error when a memory size exceeds the defined value. + +- - ``--`` [with-http_flv_module](https://nginx.org/en/docs/http/ngx_http_flv_module.html) + - Provides pseudo-streaming server-side support for Flash Video (FLV) files. + +- - ``--`` [with-http_geoip_module](https://nginx.org/en/docs/http/ngx_http_geoip_module.html) + - Enables creating variables whose values depend on the client IP address. The module uses [MaxMind](http://www.maxmind.com) GeoIP databases. To compile as a separate [dynamic module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/dynamic-modules/) instead, change the option to `--with-http_geoip_module=dynamic`. + +- - ``--`` [with-http_gunzip_module](https://nginx.org/en/docs/http/ngx_http_gunzip_module.html) + - Decompresses responses with `Content-Encoding: gzip` for clients that do not support the _zip_ encoding method. + +- - ``--`` [with-http_gzip_static_module](https://nginx.org/en/docs/http/ngx_http_gzip_static_module.html) + - Allows sending precompressed files with the **.gz** filename extension instead of regular files. + +- - ``--`` [with-http_image_filter_module](https://nginx.org/en/docs/http/ngx_http_image_filter_module.html) + - Transforms images in JPEG, GIF, and PNG formats. The module requires the [LibGD](http://libgd.github.io/) library. To compile as a separate [dynamic module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/dynamic-modules/) instead, change the option to ``--with-http_image_filter_module=dynamic``. + +- - ``--`` [with-http_mp4_module](https://nginx.org/en/docs/http/ngx_http_mp4_module.html) + - Provides pseudo-streaming server-side support for MP4 files. + +- - ``--`` [with-http_perl_module](https://nginx.org/en/docs/http/ngx_http_perl_module.html) + - Used to implement location and variable handlers in Perl and insert Perl calls into SSI. Requires the [PERL](https://www.perl.org/get.html) library. To compile as a separate [dynamic module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/dynamic-modules/) instead, change the option to ``--with-http_perl_module=dynamic``. + +- - ``--`` [with-http_random_index_module](https://nginx.org/en/docs/http/ngx_http_random_index_module.html) + - Processes requests ending with the slash character (‘/’) and picks a random file in a directory to serve as an index file. + +- - ``--`` [with-http_realip_module](https://nginx.org/en/docs/http/ngx_http_realip_module.html) + - Changes the client address to the one sent in the specified header field. + +- - ``--`` [with-http_secure_link_module](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html) + - Used to check authenticity of requested links, protect resources from unauthorized access, and limit link lifetime. + +- - ``--`` [with-http_slice_module](https://nginx.org/en/docs/http/ngx_http_slice_module.html) + - Allows splitting a request into subrequests, each subrequest returns a certain range of response. Provides more effective caching of large files. + +- - ``--`` [with-http_ssl_module](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) + - Enables HTTPS support. Requires an SSL library such as [OpenSSL](https://www.openssl.org/). + +- - ``--`` [with-http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html) + - Provides access to basic status information. Note that NGINX Plus customers do not require this module as they are already provided with extended status metrics and interactive dashboard. + +- - ``--`` [with-http_sub_module](https://nginx.org/en/docs/http/ngx_http_sub_module.html) + - Modifies a response by replacing one specified string by another. + +- - ``--`` [with-http_xslt_module](https://nginx.org/en/docs/http/ngx_http_xslt_module.html) + - Transforms XML responses using one or more XSLT stylesheets. The module requires the [Libxml2](http://xmlsoft.org/) and [XSLT](http://xmlsoft.org/XSLT/) libraries. To compile as a separate [dynamic module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/dynamic-modules/) instead, change the option to ``--with-http_xslt_module=dynamic``. + +- - ``--`` [with-http_v2_module](https://nginx.org/en/docs/http/ngx_http_v2_module.html) + - Enable support for [HTTP/2](https://datatracker.ietf.org/doc/html/rfc7540). See [The HTTP/2 Module in NGINX](https://www.nginx.com/blog/http2-module-nginx/) on the NGINX blog for details. + +- - ``--`` [with-mail](https://nginx.org/en/docs/mail/ngx_mail_core_module.html) + - Enables mail proxy functionality. To compile as a separate [dynamic module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/dynamic-modules/) instead, change the option to ``--with-mail=dynamic``. + +- - ``--`` [with-mail_ssl_module](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html) + - Provides support for a mail proxy server to work with the SSL/TLS protocol. Requires an SSL library such as [OpenSSL](https://www.openssl.org/). + +- - ``--`` [with-stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html) + - Enables the TCP and UDP proxy functionality. To compile as a separate [dynamic module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/dynamic-modules/) instead, change the option to ``--with-stream=dynamic``. + +- - ``--`` [with-stream_ssl_module](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html) + - Provides support for a stream proxy server to work with the SSL/TLS protocol. Requires an SSL library such as [OpenSSL](https://www.openssl.org/). + +- - ``--with-threads`` + - Enables NGINX to use thread pools. For details, see [Thread Pools in NGINX Boost Performance 9x!](https://www.nginx.com/blog/thread-pools-boost-performance-9x/) on the NGINX blog. + +#### Including Third-Party Modules + +You can extend NGINX functionality by compiling NGINX Open Source with your own module or a third‑party module. Some third‑party modules are listed in the [NGINX Wiki](https://nginx.com/resources/wiki/modules/). Use third‑party modules at your own risk as their stability is not guaranteed. + + +##### Statically Linked Modules + +Most modules built into NGINX Open Source are _statically linked_: they are built into NGINX Open Source at compile time and are linked to the NGINX binary statically. These modules can be disabled only by recompiling NGINX. + +To compile NGINX Open Source with a statically linked third‑party module, include the `--add-module=` option on the `configure` command, where `` is the path to the source code (this example is for the [RTMP](https://github.com/arut/nginx-rtmp-module) module): + +```shell +./configure ... --add-module=/usr/build/nginx-rtmp-module +``` + +##### Dynamically Linked Modules + +NGINX modules can also be compiled as a shared object (**\*.so** file) and then dynamically loaded into NGINX Open Source at runtime. This provides more flexibility, as the module can be loaded or unloaded at any time by adding or removing the associated [load_module](https://nginx.org/en/docs/ngx_core_module.html#load_module) directive in the NGINX configuration file and reloading the configuration. Note that the module itself must support dynamic linking. + +To compile NGINX Open Source with a dynamically loaded third‑party module, include the `--add-dynamic-module=` option on the `configure` command, where `` is the path to the source code: + + +```shell +./configure ... --add-dynamic-module= +``` + +The resulting **\*.so** files are written to the _prefix_**/modules/** directory, where the _prefix_ is a directory for server files such as **/usr/local/nginx/**. + +To load a dynamic module, add the [load_module](https://nginx.org/en/docs/ngx_core_module.html#load_module) directive to the NGINX configuration after installation: + +```nginx +load_module modules/ngx_mail_module.so; +``` + +For more information, see [Compiling Third‑Party Dynamic Modules for NGINX and NGINX Plus](https://www.nginx.com/blog/compiling-dynamic-modules-nginx-plus/) on the NGINX blog and [Extending NGINX](https://nginx.com/resources/wiki/extending/) in the Wiki. + + +### Completing the Installation from Source + +- Compile and install the build: + + ```shell + make + sudo make install + ``` + +- After the installation is finished, start NGINX Open Source: + + ```shell + sudo nginx + ``` diff --git a/content/nginx/admin-guide/installing-nginx/installing-nginx-plus-amazon-web-services.md b/content/nginx/admin-guide/installing-nginx/installing-nginx-plus-amazon-web-services.md new file mode 100644 index 000000000..2090a080f --- /dev/null +++ b/content/nginx/admin-guide/installing-nginx/installing-nginx-plus-amazon-web-services.md @@ -0,0 +1,47 @@ +--- +description: Install F5 NGINX Plus on Amazon Web Services (AWS), to provide sophisticated + Layer 7 load balancing for your apps running on Amazon Linux, RHEL, and Ubuntu. +docs: DOCS-411 +doctypes: +- task +title: Installing NGINX Plus AMIs on Amazon EC2 +toc: true +weight: 300 +--- + +NGINX, Inc. participates in the Amazon Web Services (AWS) Partner Network as a Standard Technology Partner. We offer Amazon Machine Images (AMIs) for use in the Amazon Elastic Compute Cloud (EC2), available at the AWS Marketplace for several operating systems, including Amazon Linux, Red Hat Enterprise Linux, and Ubuntu. + +The AMIs contain the following components: + +- Latest version of [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus), optimized for use on Amazon EC2 +- Pre-packaged software for building highly available (HA) NGINX Plus configurations + +## Installing the F5 NGINX Plus AMI + +To quickly set up an NGINX Plus environment on AWS: + +1. Follow the instructions in [Getting Started with Amazon EC2 Linux Instances](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html) to sign up on AWS and get more information about EC2 itself. +2. Proceed to the product page for the appropriate AMI at the AWS Marketplace, and launch the AMI. + + - [NGINX Plus – Amazon Linux AMI HVM](https://aws.amazon.com/marketplace/seller-profile?id=741df81b-dfdc-4d36-b8da-945ea66b522c) + - [NGINX Plus – Red Hat Enterprise Linux 7 AMI HVM](https://aws.amazon.com/marketplace/seller-profile?id=741df81b-dfdc-4d36-b8da-945ea66b522c) + - [NGINX Plus – Red Hat Enterprise Linux 6 AMI HVM](https://aws.amazon.com/marketplace/seller-profile?id=741df81b-dfdc-4d36-b8da-945ea66b522c) + - [NGINX Plus – Ubuntu AMI HVM](https://aws.amazon.com/marketplace/seller-profile?id=741df81b-dfdc-4d36-b8da-945ea66b522c) + + Click the **Continue to Subscribe** button to proceed to the **Launch on EC2** page. + +3. Select the type of launch by clicking the appropriate tab (1‑Click Launch, **Manual Launch**, or **Service Catalog**). Choose the desired options for billing, instance size, and so on, and click the Accept Software Terms… button. +4. When configuring the firewall rules, add a rule to accept web traffic on TCP ports 80 and 443 (this happens automatically if you launch from the 1-Click Launch tab). +5. As soon as the new EC2 instance launches, NGINX Plus starts automatically and serves a default **index.html** page. To view the page, use a web browser to access the public DNS name of the new instance. You can also check the status of the NGINX Plus server by logging into the EC2 instance and running this command: + + ```nginx + /etc/init.d/nginx status + ``` + +See [NGINX Plus on the AWS Cloud Quick Start](https://aws.amazon.com/quickstart/architecture/nginx-plus/) deployment guide for details. + +## What If I Need Help? + +If you encounter any problems with NGINX Plus configuration, documentation is available at [nginx.org](https://nginx.org/en/docs/) and in the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/installing-nginx/" >}}). + +Customers who purchase an NGINX Plus AMI at the AWS Marketplace are eligible for the AWS support provided by the NGINX, Inc. engineering team. To activate support, submit the [AMI Support Activation](https://www.nginx.com/ami-support-activation/) form (you need your AWS account number). When you request support, we’ll ask you to provide the AWS account number that you registered, along with the IDs of your EC2 instances in some cases. diff --git a/content/nginx/admin-guide/installing-nginx/installing-nginx-plus-google-cloud-platform.md b/content/nginx/admin-guide/installing-nginx/installing-nginx-plus-google-cloud-platform.md new file mode 100644 index 000000000..77e6ab8c4 --- /dev/null +++ b/content/nginx/admin-guide/installing-nginx/installing-nginx-plus-google-cloud-platform.md @@ -0,0 +1,63 @@ +--- +description: Install F5 NGINX Plus on the Google Cloud Platform, to provide sophisticated + Layer 7 load balancing for your apps running on Google Compute Engine. +docs: DOCS-412 +doctypes: +- task +title: Installing NGINX Plus on the Google Cloud Platform +toc: true +weight: 400 +--- + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus), the high‑performance application delivery platform, load balancer, and web server, is available on the Google Cloud Platform as a virtual machine (VM) image. The VM image contains the latest version of NGINX Plus, optimized for use with the Google Cloud Platform Compute Engine. + + +## Installing the NGINX Plus VM + +To quickly set up an NGINX Plus environment on the Google Cloud Platform, perform the following steps. + +> **Note:** The Google Cloud Platform interface is under active development. Although we make every effort to provide accurate instructions, they are subject to change. Also, the exact options you see depend on whether or not you have existing projects. + +1. Access the [NGINX Plus page](https://console.cloud.google.com/marketplace/details/nginx-public/nginx-plus) in Google Marketplace and click the **LAUNCH ON COMPUTE ENGINE** button. + + screenshot F5 NGINX Plus on Google Marketplace + +2. The **Select or create a project** window opens. Enter a project name and select a value from the **Organization** drop‑down, then click the **Create** button. + + screenshot nginx plus to google computer engine + +3. In the **Configure & Deploy** window, enter or select appropriate values for zone, machine type, and so on. Click the **Deploy** button. + + > **Note:** In the **Firewall** section, be sure the **Allow HTTP traffic** checkbox is checked. For more information on controlling incoming traffic, see the [Firewall Rules Overview](https://cloud.google.com/vpc/docs/firewalls) in the Google Cloud Platform documentation. + + Screenshot New NGINX Plus Deployment + +4. Click the **Create** button. The Google Developers Console confirms that NGINX Plus was deployed. + + Screenshot NGINX plus deployed on Google Cloud Platform + +As soon as the project deploys and the new virtual machine (VM) instance starts running, NGINX Plus starts automatically and serves a default **index.html** page. To verify that NGINX Plus is working properly, use a web browser to access the public DNS name of the new VM and view the page. + +You can also check the status of the NGINX Plus server by logging into the VM and running this command: + +```shell +/etc/init.d/nginx status +``` + + +## What If I Need Help? + +If you encounter any problems with NGINX Plus configuration, documentation is available at [nginx.org](https://nginx.org/en/docs/) and in the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/installing-nginx/" >}}). + +Customers who purchase an NGINX Plus VM image on the Google Cloud Platform are eligible for the Google Cloud Platform VM support provided by the NGINX, Inc. engineering team. To activate support, submit the [Google Cloud Platform Support Activation](https://www.nginx.com/gcp-support-activation/) form. + + +### Accessing the Open Source Licenses for NGINX Plus + +NGINX Plus includes open source software written by NGINX, Inc. and other contributors. The text of the open source licenses is provided in Appendix B of the _NGINX Plus Reference Guide_. To access the guide included with the NGINX Plus VM instance, run this command: + +```shell +less /usr/share/nginx/html/nginx-modules-reference.pdf +``` + +The _NGINX Plus Reference Guide_ is also [available online](http://www.nginx.com/wp-content/uploads/2023/08/nginx-modules-reference.pdf). diff --git a/content/nginx/admin-guide/installing-nginx/installing-nginx-plus-microsoft-azure.md b/content/nginx/admin-guide/installing-nginx/installing-nginx-plus-microsoft-azure.md new file mode 100644 index 000000000..d54dc5d0a --- /dev/null +++ b/content/nginx/admin-guide/installing-nginx/installing-nginx-plus-microsoft-azure.md @@ -0,0 +1,37 @@ +--- +description: Install F5 NGINX Plus as a virtual machine image on Microsoft Azure, to + provide sophisticated Layer 7 load balancing for your apps. +docs: DOCS-413 +doctypes: +- task +title: Installing NGINX Plus on Microsoft Azure +toc: true +weight: 500 +--- + +[F5 NGINX Plus](https://www.f5.com/products/nginx/nginx-plus), the high-performance application delivery platform, load balancer, and web server, is available at the Microsoft Azure Marketplace as a virtual machine (VM) image. + +The VM image contains the latest version of NGINX Plus, optimized for use with Azure. + +## Installing the NGINX Plus VM + +To quickly set up an NGINX Plus environment on Microsoft Azure: + +1. Follow the instructions in [Create a Virtual Machine Running Linux](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/quick-create-portal) to sign up on Azure and get more information about Azure itself. +2. Search for “NGINX Plus” in the [Azure Marketplace](https://azure.microsoft.com/en-us/marketplace/), open the VM image, and follow the installation instructions. + +3. Create an Azure _availability set_ of two or more NGINX Plus virtual machines, which adds redundancy to your NGINX Plus setup by ensuring that at least one virtual machine remains available during a planned or unplanned maintenance event on the Azure platform. For more information, see [Manage the availability of Linux virtual machines](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/manage-availability?) in the Azure documentation. + +4. Create _endpoints_ to enable clients outside the NGINX Plus virtual machine’s cloud or virtual network to access it. Sign in to the Azure Management Portal and add endpoints manually to handle the inbound network traffic on port 80 (HTTP) and port 443 (HTTPS). For more information, see [How to set up endpoints on a Linux classic virtual machine in Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/setup-endpoints) in the Azure documentation. + +5. As soon as the new virtual machine launches, NGINX Plus starts automatically and serves a default **index.html** page. To verify that NGINX Plus is working properly, use a web browser to access the public DNS name of the new virtual machine and view the page. You can also check the status of the NGINX Plus server by logging into the virtual machine and running this command: + + ```none + /etc/init.d/nginx status + ``` + +## What If I Need Help? + +If you encounter any problems with NGINX Plus configuration, documentation is available at [nginx.org](https://nginx.org/en/docs/) and in the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/installing-nginx/" >}}). + +Customers who purchase an NGINX Plus VM image at the Azure Marketplace are eligible for the Azure VM support provided by the NGINX, Inc. engineering team. To activate support, submit the [Azure support activation](https://www.nginx.com/azure-support-activation/) form (you need your Azure subscription ID). When you request support, we’ll ask you to provide the Azure subscription ID that you registered, along with the deployment IDs of your Azure virtual machines in some cases. diff --git a/content/nginx/admin-guide/installing-nginx/installing-nginx-plus.md b/content/nginx/admin-guide/installing-nginx/installing-nginx-plus.md new file mode 100644 index 000000000..6695f2d2b --- /dev/null +++ b/content/nginx/admin-guide/installing-nginx/installing-nginx-plus.md @@ -0,0 +1,928 @@ +--- +description: Install and upgrade F5 NGINX Plus with step-by-step instructions for the + base package and dynamic modules on all supported Linux distributions. +docs: DOCS-414 +doctypes: +- task +title: Installing NGINX Plus +toc: true +weight: 100 +--- + +This article explains how to install NGINX Plus on different operating systems, upgrade existing NGINX Plus installation, install and enable dynamic modules, install in rootless mode or when offline. + +## Prerequisites {#prereq} + +- An NGINX Plus subscription (purchased or trial) +- Credentials to the [MyF5 Customer Portal](https://account.f5.com/myf5), provided by email from F5, Inc. +- A [supported operating system]({{< relref "nginx/technical-specs.md" >}}) +- `root` privilege + +## Install NGINX Plus on Amazon Linux 2023 {#install_amazon2023} + +1. {{< include "nginx-plus/install/check-tech-specs.md" >}} + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. {{< include "licensing-and-reporting/download-jwt-crt-from-myf5.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-jwt.md" >}} + +1. {{< include "nginx-plus/install/copy-jwt-to-etc-nginx-dir.md" >}} + +1. {{< include "nginx-plus/install/install-ca-certificates-dependency-dnf.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-crt-key.md" >}} + +1. {{< include "nginx-plus/install/copy-crt-and-key.md" >}} + +1. Add the NGINX Plus repository to your Amazon Linux 2023 instance. Download the [plus-amazonlinux2023.repo](https://cs.nginx.com/static/files/plus-amazonlinux2023.repo) file to **/etc/yum.repos.d**: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/plus-amazonlinux2023.repo + ``` + +1. {{< include "nginx-plus/install/install-nginx-plus-package-dnf.md" >}} + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. {{< include "nginx-plus/install/configure-usage-reporting.md" >}} + +1. {{< include "nginx-plus/install/install-nginx-agent-for-nim.md" >}} + +## Install NGINX Plus on Amazon Linux 2 {#install_amazon2} + +1. {{< include "nginx-plus/install/check-tech-specs.md" >}} + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. {{< include "licensing-and-reporting/download-jwt-crt-from-myf5.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-jwt.md" >}} + +1. {{< include "nginx-plus/install/copy-jwt-to-etc-nginx-dir.md" >}} + +1. {{< include "nginx-plus/install/install-ca-certificates-dependency-yum.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-crt-key.md" >}} + +1. {{< include "nginx-plus/install/copy-crt-and-key.md" >}} + +1. Add the NGINX Plus repository to your Amazon Linux 2 instance. Download the [nginx-plus-amazon2.repo](https://cs.nginx.com/static/files/nginx-plus-amazon2.repo) file to **/etc/yum.repos.d**: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-amazon2.repo + ``` + +1. {{< include "nginx-plus/install/install-nginx-plus-package-yum.md" >}} + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. {{< include "nginx-plus/install/configure-usage-reporting.md" >}} + +1. {{< include "nginx-plus/install/install-nginx-agent-for-nim.md" >}} + +## Install NGINX Plus on RHEL 7.4+, CentOS 7.4+, and Oracle Linux 7.4+ {#install_rhel_centos} + +{{< call-out "important" "Deprecation notice" "" >}} +CentOS 7.4, RHEL 7.4, and Oracle Linux 7.4 are deprecated as of NGINX Plus Release 32 (R32) and are not supported in Release 33 (R33) or later. For the list of supported distributions, refer to the [NGINX Plus Tech Specs]({{< relref "nginx/technical-specs.md" >}}). +{{}} + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. Download the SSL certificate and private key associated with your NGINX Plus subscription from the MyF5 Customer Portal: + + - Log in to [MyF5](https://my.f5.com/manage/s/). + - Go to **My Products & Plans > Subscriptions** to see your active subscriptions. + - Find your NGINX products or services subscription, and select the **Subscription ID** for details. + - Download the **nginx-repo.crt** and **nginx-repo.key** from the subscription page. + +1. {{< include "nginx-plus/install/install-ca-certificates-dependency-yum.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-crt-key.md" >}} + +1. {{< include "nginx-plus/install/copy-crt-and-key.md" >}} + +1. Add the NGINX Plus repository by downloading the [nginx-plus-7.4.repo](https://cs.nginx.com/static/files/nginx-plus-7.4.repo) file to **/etc/yum.repos.d**: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo + ``` + +
      + Learn how to pin NGINX Plus to a specific version + {{}}{{< include "nginx-plus/install/pin-to-version/pin-rhel7-R32.md" >}}{{}} +
      + +1. {{< include "nginx-plus/install/install-nginx-plus-package-yum.md" >}} + +1. {{< include "nginx-plus/install/enable-nginx-service-at-boot.md" >}} + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. {{< include "nginx-plus/install/install-nginx-agent-for-nim.md" >}} + +## Install NGINX Plus on RHEL 8.1+, Oracle Linux 8.1+, AlmaLinux 8, Rocky Linux 8 {#install_rhel8} + +1. {{< include "nginx-plus/install/check-tech-specs.md" >}} + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. {{< include "licensing-and-reporting/download-jwt-crt-from-myf5.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-jwt.md" >}} + +1. {{< include "nginx-plus/install/copy-jwt-to-etc-nginx-dir.md" >}} + +1. {{< include "nginx-plus/install/install-ca-certificates-dependency-dnf.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-crt-key.md" >}} + +1. {{< include "nginx-plus/install/copy-crt-and-key.md" >}} + +1. Add the NGINX Plus repository by downloading the [nginx-plus-8.repo](https://cs.nginx.com/static/files/nginx-plus-8.repo) file to **/etc/yum.repos.d**: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-8.repo + ``` + +
      + Learn how to pin NGINX Plus to a specific version + {{}}{{< include "nginx-plus/install/pin-to-version/pin-rhel8-R32.md" >}}{{}} +
      + +1. {{< include "nginx-plus/install/install-nginx-plus-package-dnf.md" >}} + +1. {{< include "nginx-plus/install/enable-nginx-service-at-boot.md" >}} + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. {{< include "nginx-plus/install/configure-usage-reporting.md" >}} + +1. {{< include "nginx-plus/install/install-nginx-agent-for-nim.md" >}} + +## Install NGINX Plus on RHEL 9.0+, Oracle Linux 9, AlmaLinux 9, Rocky Linux 9 {#install_rhel} + +1. {{< include "nginx-plus/install/check-tech-specs.md" >}} + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. {{< include "licensing-and-reporting/download-jwt-crt-from-myf5.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-jwt.md" >}} + +1. {{< include "nginx-plus/install/copy-jwt-to-etc-nginx-dir.md" >}} + +1. {{< include "nginx-plus/install/install-ca-certificates-dependency-dnf.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-crt-key.md" >}} + +1. {{< include "nginx-plus/install/copy-crt-and-key.md" >}} + +1. Add the NGINX Plus repository by downloading the [plus-9.repo](https://cs.nginx.com/static/files/plus-9.repo) file to **/etc/yum.repos.d**: + + ```shell + sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/plus-9.repo + ``` + +
      + Learn how to pin NGINX Plus to a specific version + {{}}{{< include "nginx-plus/install/pin-to-version/pin-rhel9-R32.md" >}}{{}} +
      + +1. {{< include "nginx-plus/install/install-nginx-plus-package-dnf.md" >}} + +1. {{< include "nginx-plus/install/enable-nginx-service-at-boot.md" >}} + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. {{< include "nginx-plus/install/configure-usage-reporting.md" >}} + +1. {{< include "nginx-plus/install/install-nginx-agent-for-nim.md" >}} + +## Install NGINX Plus on Debian or Ubuntu {#install_debian_ubuntu} + +NGINX Plus can be installed on the following versions of Debian or Ubuntu: + +1. {{< include "nginx-plus/install/check-tech-specs.md" >}} + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. {{< include "licensing-and-reporting/download-jwt-crt-from-myf5.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-jwt.md" >}} + +1. {{< include "nginx-plus/install/copy-jwt-to-etc-nginx-dir.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-crt-key.md" >}} + +1. {{< include "nginx-plus/install/copy-crt-and-key.md" >}} + +1. Install the prerequisites packages: + + - **For Debian**: + + ```shell + sudo apt update + sudo apt install apt-transport-https lsb-release ca-certificates wget gnupg2 debian-archive-keyring + ``` + + - **For Ubuntu**: + + ```shell + sudo apt update + sudo apt install apt-transport-https lsb-release ca-certificates wget gnupg2 ubuntu-keyring + ``` + +1. Download and add NGINX signing key: + + ```shell + wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key \ + | gpg --dearmor \ + | sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null + +1. Add the NGINX Plus repository: + + - **For Debian**: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" \ + | sudo tee /etc/apt/sources.list.d/nginx-plus.list + ``` + + - **For Ubuntu**: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + https://pkgs.nginx.com/plus/ubuntu `lsb_release -cs` nginx-plus\n" \ + | sudo tee /etc/apt/sources.list.d/nginx-plus.list + ``` + +
      + Learn how to pin NGINX Plus to a specific version + {{}}{{< include "nginx-plus/install/pin-to-version/pin-debian-ubuntu-R32.md" >}}{{}} +
      + +1. Download the **nginx-plus** apt configuration to **/etc/apt/apt.conf.d**: + + ```shell + sudo wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx + ``` + +1. Update the repository information: + + ```shell + sudo apt update + ``` + +1. Install the **nginx-plus** package. Any older NGINX Plus package is automatically replaced. + + ```shell + sudo apt install -y nginx-plus + ``` + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. {{< include "nginx-plus/install/configure-usage-reporting.md" >}} + +1. {{< include "nginx-plus/install/install-nginx-agent-for-nim.md" >}} + +## Install NGINX Plus on FreeBSD {#install_freebsd} + +1. {{< include "nginx-plus/install/check-tech-specs.md" >}} + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. {{< include "licensing-and-reporting/download-jwt-crt-from-myf5.md" >}} + +1. Create the **/usr/local/etc/nginx/** directory for the JWT license file: + + ```shell + sudo mkdir -p /usr/local/etc/nginx + cd /usr/local/etc/nginx + ``` + +1. Rename the downloaded JWT file to **license.jwt** and copy it to the **/usr/local/etc/nginx** directory: + + ```shell + sudo cp license.jwt /usr/local/etc/nginx + ``` + +1. Install the prerequisite **ca_root_nss** package: + + ```shell + sudo pkg update + sudo pkg install ca_root_nss + ``` + +1. {{< include "nginx-plus/install/create-dir-for-crt-key.md" >}} + +1. {{< include "nginx-plus/install/copy-crt-and-key.md" >}} + +1. Copy the [nginx-plus.conf](https://cs.nginx.com/static/files/nginx-plus.conf) file to the **/etc/pkg/** directory: + + ```shell + sudo fetch -o /etc/pkg/nginx-plus.conf http://cs.nginx.com/static/files/nginx-plus.conf + ``` + +1. Add the following lines to the **/usr/local/etc/pkg.conf** file: + + ```none + PKG_ENV: { SSL_NO_VERIFY_PEER: "1", + SSL_CLIENT_CERT_FILE: "/etc/ssl/nginx/nginx-repo.crt", + SSL_CLIENT_KEY_FILE: "/etc/ssl/nginx/nginx-repo.key" } + ``` + +1. Install the **nginx-plus** package. Any older NGINX Plus package is automatically replaced. Back up your NGINX Plus configuration and log files if you have an older NGINX Plus package installed. For more information, see [Upgrading NGINX Plus](#upgrade). + + ```shell + sudo pkg install nginx-plus + ``` + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. {{< include "nginx-plus/install/configure-usage-reporting.md" >}} + +1. {{< include "nginx-plus/install/install-nginx-agent-for-nim.md" >}} + +## Install NGINX Plus on SUSE Linux Enterprise Server {#install_suse} + +1. {{< include "nginx-plus/install/check-tech-specs.md" >}} + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. {{< include "licensing-and-reporting/download-jwt-crt-from-myf5.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-jwt.md" >}} + +1. {{< include "nginx-plus/install/copy-jwt-to-etc-nginx-dir.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-crt-key.md" >}} + +1. {{< include "nginx-plus/install/copy-crt-and-key.md" >}} + +1. Create a file bundle of the certificate and key: + + ```shell + cat /etc/ssl/nginx/nginx-repo.crt /etc/ssl/nginx/nginx-repo.key > /etc/ssl/nginx/nginx-repo-bundle.crt + ``` + +1. Install the required **ca-certificates** dependency: + + ```shell + zypper refresh + zypper install ca-certificates + ``` + +1. Add the **nginx-plus** repo. + + **For SLES 12**: + + ```shell + zypper addrepo -G -t yum -c \ + "https://pkgs.nginx.com/plus/sles/12?ssl_clientcert=/etc/ssl/nginx/nginx-repo-bundle.crt&ssl_verify=peer" \ + nginx-plus + ``` + + **For SLES 15**: + + ```shell + zypper addrepo -G -t yum -c \ + "https://pkgs.nginx.com/plus/sles/15?ssl_clientcert=/etc/ssl/nginx/nginx-repo-bundle.crt&ssl_verify=peer" \ + nginx-plus + ``` + +1. Install the **nginx-plus** package. Any older NGINX Plus package is automatically replaced. + + ```shell + zypper install nginx-plus + ``` + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. {{< include "nginx-plus/install/configure-usage-reporting.md" >}} + +1. {{< include "nginx-plus/install/install-nginx-agent-for-nim.md" >}} + +## Install NGINX Plus on Alpine Linux {#install_alpine} + +1. {{< include "nginx-plus/install/check-tech-specs.md" >}} + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. {{< include "licensing-and-reporting/download-jwt-crt-from-myf5.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-jwt.md" >}} + +1. {{< include "nginx-plus/install/copy-jwt-to-etc-nginx-dir.md" >}} + +1. Upload **nginx-repo.key** to **/etc/apk/cert.key** and **nginx-repo.crt** to **/etc/apk/cert.pem**. Ensure these files contain only the specific key and certificate — Alpine Linux doesn't support mixing client certificates for multiple repositories. + +1. Put the NGINX signing public key in the **/etc/apk/keys** directory: + + ```shell + sudo wget -O /etc/apk/keys/nginx_signing.rsa.pub https://cs.nginx.com/static/keys/nginx_signing.rsa.pub + ``` + +1. Add the NGINX repository to the **/etc/apk/repositories** file: + + ```shell + printf "https://pkgs.nginx.com/plus/alpine/v`egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release`/main\n" \ + | sudo tee -a /etc/apk/repositories + ``` + +1. Remove all community-supported NGINX packages. Note that this will also remove all NGINX modules: + + ```shell + sudo apk del -r nginx + ``` + +1. Install the NGINX Plus package: + + ```shell + sudo apk add nginx-plus + ``` + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. {{< include "nginx-plus/install/configure-usage-reporting.md" >}} + +1. {{< include "nginx-plus/install/install-nginx-agent-for-nim.md" >}} + + +## Install Dynamically Loadable Modules {#install_modules} + +NGINX Plus functionality can be extended with dynamically loadable modules that are not included in the prebuilt packages: + +- NGINX-authored dynamic modules – Modules developed and maintained by F5 NGINX. These modules can be installed directly from the official repository: + - [GeoIP]({{< relref "nginx/admin-guide/dynamic-modules/geoip.md" >}}) + - [Image-Filter]({{< relref "nginx/admin-guide/dynamic-modules/image-filter.md" >}}) + - [njs Scripting Language]({{< relref "nginx/admin-guide/dynamic-modules/nginscript.md" >}}) + - [OTel]({{< relref "nginx/admin-guide/dynamic-modules/opentelemetry.md" >}}) + - [Perl]({{< relref "nginx/admin-guide/dynamic-modules/perl.md" >}}) + - [XSLT]({{< relref "nginx/admin-guide/dynamic-modules/xslt.md" >}}) + +- NGINX-certified community dynamic modules – Popular third‑party modules tested and distributed by F5 NGINX, with installation and basic configuration support provided. These modules are also available directly from the official repository: + - [Brotli]({{< relref "nginx/admin-guide/dynamic-modules/brotli.md" >}}) + - [Encrypted-Session]({{< relref "nginx/admin-guide/dynamic-modules/encrypted-session.md" >}}) + - [FIPS Status Check]({{< relref "nginx/admin-guide/dynamic-modules/fips.md" >}}) + - [GeoIP2]({{< relref "nginx/admin-guide/dynamic-modules/geoip2.md" >}}) + - [Headers-More]({{< relref "nginx/admin-guide/dynamic-modules/headers-more.md" >}}) + - [HTTP Substitutions Filter]({{< relref "nginx/admin-guide/dynamic-modules/http-substitutions-filter.md" >}}) + - [Lua]({{< relref "nginx/admin-guide/dynamic-modules/lua.md" >}}) + - [NGINX Developer Kit]({{< relref "nginx/admin-guide/dynamic-modules/ndk.md" >}}) + - [OpenTelemetry]({{< relref "nginx/admin-guide/dynamic-modules/opentelemetry.md" >}}) + - [OpenTracing]({{< relref "nginx/admin-guide/dynamic-modules/opentracing.md" >}}) + - [Phusion Passenger]({{< relref "nginx/admin-guide/dynamic-modules/passenger-open-source.md" >}}) + - [Prometheus-njs]({{< relref "nginx/admin-guide/dynamic-modules/prometheus-njs.md" >}}) + - [RTMP]({{< relref "nginx/admin-guide/dynamic-modules/rtmp.md" >}}) + - [Set-Misc]({{< relref "nginx/admin-guide/dynamic-modules/set-misc.md" >}}) + - [SPNEGO]({{< relref "nginx/admin-guide/dynamic-modules/spnego.md" >}}) + +- Community dynamic modules – Modules written and distributed by third‑party members of the NGINX community. To use these modules, download the source code from the author's repository and [compile it against the NGINX Open Source version](#install_modules_oss) corresponding to your NGINX Plus version. These modules are not available in the official repository but can be found in different community resources such as [awesome-nginx GitHub project](https://github.com/agile6v/awesome-nginx#third-party-modules). + +### Install Dynamic Modules from Official Repository {#install_modules_plus} + +NGINX‑authored and NGINX‑certified dynamic modules can be installed directly from the modules repository. To install the modules: + +- For RHEL, Amazon Linux 2, CentOS, Oracle Linux: + + ```shell + yum update + yum install + ``` + +- For Amazon Linux 2023, AlmaLinux and Rocky Linux: + + ```shell + dnf update + dnf install + ``` + +- For Debian and Ubuntu: + + ```shell + apt update + apt install + ``` + +- For FreeBSD: + + ```shell + pkg update + pkg install + ``` + +- For SLES: + + ```shell + zypper refresh + zypper install + ``` + +- For Alpine Linux: + + ```shell + sudo apk update + sudo apk add + ``` + +See [NGINX Plus Dynamic Modules]({{< relref "nginx/admin-guide/dynamic-modules/dynamic-modules.md" >}}) for detailed installation instructions for each dynamic module. + +Note that some modules may not be available for certain OS versions because of operating system limitations. For details and descriptions of the modules, see the [NGINX Plus Technical Specifications]({{< relref "nginx/technical-specs.md" >}}). + +After installing the module, you need to enable it in the NGINX Plus configuration file. For more information, see [Enabling Dynamic Modules](#enable_dynamic). + +### Install NGINX Community Modules {#install_modules_oss} + +For a community dynamic module to work with NGINX Plus, it must be compiled alongside the corresponding version of NGINX Open Source. + +1. Prepare the build environment. + + We strongly recommend compiling dynamic modules on a separate system, referred to as the “build environment”. This approach minimizes the risk and complexity for the system where NGINX Plus will be upgraded, referred to as the “production environment”. The build environment should meet the following requirements: + + - The same operating system as the production environment + - The same NGINX version as the production environment + - Compiler and `make` utilities + - [PCRE](http://pcre.org/) library (development files) + - [Zlib](http://www.zlib.net/) compression libraries (development files) + + To verify that the required prerequisites are installed in your build environment, run the following commands: + + - For Debian and Ubuntu: + + ```shell + sudo apt update + sudo apt install gcc make libpcre3-dev zlib1g-dev + ``` + + - For CentOS, Oracle Linux, and RHEL: + + ```shell + sudo yum update + sudo yum install gcc make pcre-devel zlib-devel + ``` + +1. Obtain NGINX Open Source. + + - Identify the NGINX Open Source version that corresponds to your version of NGINX Plus. See [NGINX Plus Releases]({{< relref "nginx/releases.md" >}}). + + - Download the sources for the appropriate NGINX Open Source mainline version, in this case 1.27.2: + + ```shell + wget -qO - https://nginx.org/download/nginx-1.27.2.tar.gz | tar zxfv - + ``` + +1. Obtain the source for the dynamic module. + + The source code for the dynamic module can be placed in any directory in the build environment. As an example, here we're copying the [NGINX “Hello World” module](https://github.com/perusio/nginx-hello-world-module.git/) from GitHub: + + ```shell + git clone https://github.com/perusio/nginx-hello-world-module.git + ``` + +1. Compile the dynamic module. + + First establish binary compatibility by running the `configure` script with the `‑‑with‑compat` option. Then compile the module with `make modules`. + + ```shell + cd nginx-1.27.2/ + ./configure --with-compat --add-dynamic-module=../ + make modules + ``` + + The **.so** file generated by the build process is placed in the **objs** subdirectory + + ```shell + ls objs/*.so + objs/ngx_http_hello_world.so + ``` + +1. Make a copy of the module file and include the NGINX Open Source version in the filename. This makes it simpler to manage multiple versions of a dynamic module in the production environment. + + ```shell + cp objs/ngx_http_hello_world.so ./ngx_http_hello_world_1.27.2.so + ``` + +### Enabling Dynamic Modules {#enable_dynamic} + +Dynamic modules are located in the **/etc/nginx/modules** directory, which is created automatically at NGINX Plus installation. + +To enable a dynamic module: + +1. In the `main` (top-level) context in **/etc/nginx/nginx.conf**, add a [load_module](https://nginx.org/en/docs/ngx_core_module.html#load_module) directive for each dynamically loadable module you installed. + + ```nginx + load_module modules/.so; + ``` + +1. Check the new configuration for syntactic validity and reload NGINX Plus. + + ```shell + nginx -t && nginx -s reload + ``` + +## NGINX Plus Unprivileged Installation {#unpriv_install} + +In some environments, access to the root account is restricted for security reasons. On Linux systems, this limitation prevents the use of package managers to install NGINX Plus without root privileges. + +As a workaround, in such environments NGINX Plus can be installed with a special script that modifies NGINX Plus configuration file to allow it to run from a non-root user. This script performs the following actions: + +- Downloads the NGINX Plus packages + +- Extracts the content of the archives into a user-defined directory of the packages + +- Updates the paths in the NGINX configuration file to use relative paths in the specified directory + +- Makes a backup copy of the configuration directory + +- Has an option to upgrade an existing unprivileged installation of NGINX Plus + +Comparing to a standard installation of NGINX Plus, an unprivileged installation has certain limitations and restrictions: + +- Root privileges are still required in order to listen on ports below `1024`. + +- The script is not intended to replace your operating system's package manager and does not allow for the installation of any software other than NGINX Plus and its modules. Modifications to the script for other installations are not covered by the support program. + +- NGINX Plus will not start automatically, so, you must add custom `init` script/`systemd`unit file for each unprivileged installation on the host. + +- all dependencies and libraries required by the NGINX Plus binary and its modules are not installed automatically and should be checked and installed manually. + +The script can be run on the following operating systems: + +- RedHat, CentOS +- Amazon Linux 2 +- Amazon Linux 2023 +- Debian, Ubuntu +- Alpine Linux +- AlmaLinux, Rocky Linux + +Before starting the unprivileged installation, make sure you have all the prerequisites listed in the [Prerequisites](#prereq) section (excluding `root` privileges). For RPM-based distributions, verify that you have [`rpm2cpio`](https://man7.org/linux/man-pages/man8/rpm2cpio.8.html) installed. + +To perform an unprivileged installation of NGINX Plus: + +1. Obtain the script: + + ```shell + wget https://raw.githubusercontent.com/nginxinc/nginx-plus-install-tools/main/ngxunprivinst.sh + ``` + +1. Make the script executable: + + ```shell + chmod +x ngxunprivinst.sh + ``` + +1. Download NGINX Plus and its module packages for your operating system. The ``, `` and `` are your NGINX Plus certificate, private key, and JWT license obtained from [MyF5 Customer Portal](https://account.f5.com/myf5/): + + ```shell + ./ngxunprivinst.sh fetch -c -k -j + ``` + + {{< note >}} Starting from [NGINX Plus Release 33]({{< ref "nginx/releases.md#r33" >}}), a JWT license file (``) is required for each NGINX Plus instance. For more information, see [About Subscription Licenses]({{< ref "/solutions/about-subscription-licenses.md">}}). {{< /note >}} + + If you need to install a particular version of NGINX Plus: + + - first, list all available NGINX Plus versions from the repository: + + ```shell + ./ngxunprivinst.sh list -c -k + ``` + + - then specify a particular NGINX Plus version with the `-v` option: + + ```shell + ./ngxunprivinst.sh fetch -c -k -v + ``` + +1. Extract the downloaded packages to the provided NGINX Plus prefix ``. An optional `-y` option will overwrite an existing installation (if any). Starting from version R33, the `-j` option that specifies the `` is mandatory: + + ```shell + ./ngxunprivinst.sh install [-y] -p -j + ``` + +1. When the installation procedure is finished, run NGINX Plus. The `-p` parameter sets a path to the directory that keeps nginx files. The `-c` parameter sets a path to an alternative NGINX configuration file. Please note NGINX Plus must listen on ports above `1024`: + + ```shell + /usr/sbin/nginx -p /etc/nginx -c /etc/nginx/conf.d + ``` + +With this script, you can also upgrade an existing unprivileged installation of NGINX Plus in the provided . An optional `-y` option performs a forced upgrade without any confirmation: + +```shell +./ngxunprivinst.sh upgrade [-y] -p +``` + +## NGINX Plus Offline Installation {#offline_install} + +This section explains how to install NGINX Plus and its [dynamic modules]({{< relref "/nginx/admin-guide/dynamic-modules/dynamic-modules.md" >}}) on a server with limited or no Internet access. + +To install NGINX Plus offline, you will need a machine connected to the Internet to get the NGINX Plus package, JWT license, SSL certificate and key. Then your can transfer these files to the target server for offline installation. + +### Step 1: Obtaining files on the machine connected to the Internet {#offline-obtain-files} + +1. {{< include "licensing-and-reporting/download-jwt-crt-from-myf5.md" >}} + +1. Transfer the files to the target server that doesn't have online access and where NGINX Plus will be installed. + +### Step 2: Installing NGINX Plus on a server without Internet connectivity + +1. {{< include "nginx-plus/install/back-up-config-and-logs.md" >}} + +1. Make sure you’ve downloaded the SSL certificate, private key, and the JWT file required for your NGINX Plus subscription. You can find these files in the MyF5 Customer Portal. For details on how to obtain these files, see [Step 1: Obtaining files on the machine connected to the Internet](#offline-obtain-files). + +1. {{< include "nginx-plus/install/create-dir-for-jwt.md" >}} + +1. {{< include "nginx-plus/install/copy-jwt-to-etc-nginx-dir.md" >}} + +1. {{< include "nginx-plus/install/create-dir-for-crt-key.md" >}} + +1. {{< include "nginx-plus/install/copy-crt-and-key.md" >}} + +1. Install the NGINX Plus package or a dynamic module. Any older NGINX Plus package is automatically replaced. + + - **For RHEL, Amazon Linux, CentOS, Oracle Linux, AlmaLinux and Rocky Linux**: + + ```shell + sudo rpm -ihv + ``` + + - **For Debian, Ubuntu**: + + ```shell + sudo dpkg -i + ``` + + - **For Alpine**: + + ```shell + apk add + ``` + + - **For SLES**: + + ```shell + rpm -ivh + ``` + +1. {{< include "nginx-plus/install/check-nginx-binary-version.md" >}} + +1. Install NGINX Instance Manager 2.18 or later in your local environment to enable usage reporting, which is mandatory since R33. For more information, see [Disconnected environments](https://docs.nginx.com/nginx-instance-manager/disconnected/) and [About Subscription Licenses]({{< ref "/solutions/about-subscription-licenses.md">}}). + +1. Configure usage reporting of the NGINX Plus instance to NGINX Instance Manager which is mandatory starting from R33. + + In the `nginx.conf` configuration file, specify the following directives: + + - the [`mgmt {}`](https://nginx.org/en/docs/ngx_mgmt_module.html#mgmt) block that handles NGINX Plus licensing and usage reporting configuration, + + - the [`usage_report`](https://nginx.org/en/docs/ngx_mgmt_module.html#usage_report) directive that sets the domain name or IP address of NGINX Instance Manager, + + - the [`enforce_initial_report`](https://nginx.org/en/docs/ngx_mgmt_module.html#usage_report) directive that enables the 180-day grace period for sending the initial usage report. The initial usage report must be received by F5 licensing endpoint during the grace period, otherwise traffic processing will be stopped: + + ```nginx + mgmt { + usage_report endpoint=NIM_FQDN; + enforce_initial_report off; + } + ``` + +1. {{< include "nginx-plus/install/nim-disconnected-report-usage.md" >}} + +1. Upload the usage acknowledgement to NGINX Instance Manager. For more information, see [Report usage to F5 in a disconnected environment](https://docs.nginx.com/nginx-instance-manager/disconnected/report-usage-disconnected-deployment/#submit-usage-report). + +## Upgrade NGINX Plus {#upgrade} + +{{< note >}} Starting from [Release 24]({{< ref "nginx/releases.md#r24" >}}) (R24), NGINX Plus repositories have been separated into individual repositories based on operating system distribution and license subscription. Before upgrading from previous NGINX Plus versions, you must first reconfigure your repositories to point to the correct location. To reconfigure your repository, follow the installation instructions above for your operating system. {{< /note >}} + +To upgrade your NGINX Plus installation to the newest version: + +1. If your system has previous NGINX or NGINX Plus packages on it, back up the configuration and log files. + + - **For Linux distributions**: + + ```shell + sudo cp -a /etc/nginx /etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + + - **For FreeBSD**: + + ```shell + sudo cp -a /usr/local/etc/nginx /usr/local/etc/nginx-plus-backup + sudo cp -a /var/log/nginx /var/log/nginx-plus-backup + ``` + +1. Get the JWT file associated with your NGINX Plus subscription from the MyF5 Customer Portal: + + {{< include "licensing-and-reporting/download-jwt-from-myf5.md" >}} + + {{< note >}} Starting from [NGINX Plus Release 33]({{< ref "nginx/releases.md#r33" >}}), a JWT file is required for each NGINX Plus instance. For more information, see [About Subscription Licenses]({{< ref "/solutions/about-subscription-licenses.md">}}). {{< /note >}} + +1. Create the **/etc/nginx/** directory for Linux or the **/usr/local/etc/nginx** directory for FreeBSD: + + - **For Linux**: + + ```shell + sudo mkdir -p /etc/nginx + ``` + + - **For FreeBSD**: + + ```shell + sudo mkdir -p /usr/local/etc/nginx + ``` + + {{}}{{< include "licensing-and-reporting/custom-paths-jwt.md" >}}{{}} + +1. After downloading the JWT file, copy it to the **/etc/nginx/** directory for Linux, or to the **/usr/local/etc/nginx** directory for FreeBSD, and make sure it's named **license.jwt**: + + - **For Linux**: + + ```shell + sudo cp .jwt /etc/nginx/license.jwt + ``` + + - **For FreeBSD**: + + ```shell + sudo cp .jwt /usr/local/etc/nginx/license.jwt + ``` + +2. Upgrade to the new NGINX Plus package. + + - **For RHEL, Amazon Linux, CentOS, Oracle Linux, AlmaLinux and Rocky Linux**: + + ```shell + sudo yum upgrade nginx-plus + ``` + + - **For Debian and Ubuntu**: + + ```shell + sudo apt update + sudo apt install nginx-plus + ``` + + - **For FreeBSD**: + + ```shell + sudo pkg upgrade nginx-plus + ``` + +3. Configure NGINX Plus usage reporting which is mandatory starting from R33. By default, no configuration is required. However, configuration is required in specific scenarios, such as NGINX Plus is installed in an offline environment or if the JWT license file is located in a non-default directory. + + For offline environments, usage reporting should be configured for NGINX Instance Manager 2.18 or later. In the `nginx.conf` configuration file, specify the following directives: + + - the [`mgmt`](https://nginx.org/en/docs/ngx_mgmt_module.html#mgmt) context handles NGINX Plus licensing and usage reporting configuration, + + - the [`usage_report`](https://nginx.org/en/docs/ngx_mgmt_module.html#usage_report) directive specifies the domain name or IP address of the NGINX Instance Manager, + + - the [`enforce_initial_report`](https://nginx.org/en/docs/ngx_mgmt_module.html#usage_report) directive enables a 180-day grace period for sending the initial usage report. The initial usage report must be received by F5 licensing endpoint within this grace period. If the report is not received in time, traffic processing will be stopped: + + ```nginx + mgmt { + usage_report endpoint=NIM_FQDN; + enforce_initial_report off; + } + ``` + + {{< include "nginx-plus/install/nim-disconnected-report-usage.md" >}} + + If the JWT license file is located in a directory other than **/etc/nginx/** for Linux or **usr/local/etc/nginx/** for FreeBSD, you must specify its name and path in the [`license_token`](https://nginx.org/en/docs/ngx_mgmt_module.html#license_token) directive: + + ```nginx + mgmt { + license_token custom/file/path/license.jwt; + } + ``` + + For more information, see [About Subscription Licenses]({{< ref "/solutions/about-subscription-licenses.md">}}). + +4. To verify that the new NGINX Plus version is upgraded, run: + + ```shell + nginx -v + ``` + + The output of the command: + + ```shell + nginx version: nginx/1.27.2 (nginx-plus-r33) + ``` + +## Upgrade NGINX Plus Modules {#upgrade_modules} + +The upgrade procedure depends on how the module was supplied and installed. + +- NGINX‑authored and NGINX‑certified community dynamic modules are updated automatically together with NGINX Plus. + + {{< note >}} For FreeBSD, each NGINX‑authored and NGINX‑certified module must be updated separately using FreeBSD package management tool. {{< /note >}} + +- Community dynamic modules must be recompiled against the corresponding NGINX Open Source version. See [Installing NGINX Community Modules](#install_modules_oss). + +## Explore Related Topics + +### Install NGINX App Protect + +To install NGINX App Protect, follow the steps in the [NGINX App Protect installation guide]({{< relref "nap-waf/v5/admin-guide/install.md" >}}). diff --git a/content/nginx/admin-guide/load-balancer/_index.md b/content/nginx/admin-guide/load-balancer/_index.md new file mode 100644 index 000000000..da2a83926 --- /dev/null +++ b/content/nginx/admin-guide/load-balancer/_index.md @@ -0,0 +1,9 @@ +--- +description: Documentation explaining how to configure NGINX and F5 NGINX Plus as a load + balancer for HTTP, TCP, UDP, and other protocols. +menu: + docs: + parent: NGINX Plus +title: Load Balancer +weight: 300 +--- diff --git a/content/nginx/admin-guide/load-balancer/dynamic-configuration-api.md b/content/nginx/admin-guide/load-balancer/dynamic-configuration-api.md new file mode 100644 index 000000000..bfebfb083 --- /dev/null +++ b/content/nginx/admin-guide/load-balancer/dynamic-configuration-api.md @@ -0,0 +1,210 @@ +--- +description: Dynamically reconfigure the servers in an F5 NGINX Plus upstream group using + the NGINX Plus API, without reloading configuration or restarting processes. +docs: DOCS-415 +doctypes: +- task +title: Dynamic Configuration of Upstreams with the NGINX Plus API +toc: true +weight: 700 +--- + + +## Overview + +With F5 NGINX Plus, configuration of upstream servers in a server group can be modified on-the-fly without reloading the servers and NGINX configuration. This is useful for: + +- autoscaling, when you need to add more servers +- maintenance, when you need to remove a server, specify a backup server, or take a server down temporarily +- quick setup, when you need to change upstream server settings such as server weight, active connections, slow start, failure timeouts. +- monitoring, when you get the state of the server or server group with one command + +These changes are made with the NGINX Plus REST API interface with API commands. + +> **Note:** In NGINX Plus Release 12 (R12) and earlier, dynamic configuration was performed with the `upstream_conf` handler. That API (and the extended `status` API) are now deprecated in favor of the NGINX Plus API. + + + +## Prerequisites + +Prior to using the dynamic configuration feature, make sure that you have the following environment: + +1. NGINX Plus R13 or later +2. You have created upstream groups of application or web servers, as described in [HTTP Load Balancing]({{< relref "http-load-balancer.md" >}}) and [TCP/UDP Load Balancing]({{< relref "tcp-udp-load-balancer.md" >}}) +3. Upstream server groups reside in the shared memory zone, as described in [Sharing Data with Multiple Worker Processes]({{< relref "http-load-balancer.md" >}}) + + + +## Enabling Dynamic Configuration + +1. Create an upstream server group as described in Proxying Traffic to a Group of Servers. + + ```nginx + http { + # ... + upstream appservers { + server appserv1.example.com weight=5; + server appserv2.example.com:8080 fail_timeout=5s; + server reserve1.example.com:8080 backup; + server reserve2.example.com:8080 backup; + } + + server { + # Location that proxies requests to the upstream group + location / { + proxy_pass http://appservers; + health_check; + } + } + } + ``` + +1. Include the [`zone`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) directive in the `upstream` block. The `zone` directive configures a zone in the shared memory and sets the zone name and size. The configuration of the server group is kept in this zone, so all worker processes use the same configuration: + + ```nginx + http { + # ... + upstream appservers { + zone appservers 64k; + + server appserv1.example.com weight=5; + server appserv2.example.com:8080 fail_timeout=5s; + server reserve1.example.com:8080 backup; + server reserve2.example.com:8080 backup; + } + } + ``` + +2. Enable the NGINX API in read‑write mode by including the [`api`](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) directive in a dedicated [`location`](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) block in a [`server`](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) block. + + We strongly recommend restricting access to the location and to `PATCH`/`POST`/`DELETE` methods. This example uses the [`allow`](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow) and [`deny`](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny) directives to grant access from the `localhost` address (`127.0.0.1`) and deny access from all other addresses. It also restricts access to `PATCH`/`POST`/`DELETE` methods with [HTTP basic authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html): + + ```nginx + server { + location /api { + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + api write=on; + allow 127.0.0.1; + deny all; + } + } + ``` + +Complete example: + +```nginx +http { + # ... + # Configuration of the server group + upstream appservers { + zone appservers 64k; + + server appserv1.example.com weight=5; + server appserv2.example.com:8080 fail_timeout=5s; + server reserve1.example.com:8080 backup; + server reserve2.example.com:8080 backup; + } + server { + # Location that proxies requests to the upstream group + location / { + proxy_pass http://appservers; + health_check; + } + + # Location for dynamic configuration requests + location /api { + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + api write=on; + allow 127.0.0.1; + deny all; + } + } +} +``` + + +## Using the API for Dynamic Configuration + +The NGINX Plus REST API supports the following HTTP methods: + +- `GET` – Display information about an upstream group or individual server in it +- `POST` – Add a server to the upstream group +- `PATCH` – Modify the parameters of a particular server +- `DELETE` – Delete a server from the upstream group + +The endpoints and methods for the NGINX Plus API are described in the [NGINX Modules Reference](https://nginx.org/en/docs/http/ngx_http_api_module.html). In addition, the API has a built‑in a Swagger specification that can be used to explore the API and understand the capabilities of each resource. The Swagger documentation can be accessed at `http://_NGINX-host_/swagger-ui/`. + +To change the configuration of an upstream group dynamically, send an HTTP request with the appropriate API method. The following examples use the `curl` command, but any mechanism for making HTTP requests is supported. All request bodies and responses are in JSON format. + +The URI specifies the following information in this order: + +- The hostname or IP address of the node that handles the request (in the following examples, `127.0.0.1`) +- The location where the `api` directive appears (`api`) +- The API version (`9`) +- The name of the upstream group, complete its place in the NGINX Plus configuration hierarchy represented as a slash‑separated path (`http/upstreams/appservers`) + +For example, to add a new server to the `appservers` upstream group, send the following `curl` command: + +```shell +curl -X POST -d '{ \ + "server": "10.0.0.1:8089", \ + "weight": 4, \ + "max_conns": 0, \ + "max_fails": 0, \ + "fail_timeout": "10s", \ + "slow_start": "10s", \ + "backup": true, \ + "down": true \ + }' -s 'http://127.0.0.1/api/9/http/upstreams/appservers/servers' +``` + +To remove a server from the upstream group: + +```shell +curl -X DELETE -s 'http://127.0.0.1/api/9/http/upstreams/appservers/servers/0' +``` + +To set the `down` parameter for the first server in the group (with ID `0`): + +```shell +curl -X PATCH -d '{ "down": true }' -s 'http://127.0.0.1/api/9/http/upstreams/appservers/servers/0' +``` + + + +### Interactive Example + +You can explore the Swagger interface to the NGINX Plus API in read‑only mode at [https://demo.nginx.com/swagger-ui/](https://demo.nginx.com/swagger-ui/). + + + +## Configuring Persistence of Dynamic Configuration + +With the basic configuration in +[Enabling the API](#api_setup), changes made with the API are stored only in the shared memory zone. The changes are discarded when the NGINX Plus configuration file is reloaded. + +To make the changes persist across configuration reloads, move the list of upstream servers from the `upstream` block to a special file for storing server state, defined with the [`state`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#state) directive. The recommended path for Linux distributions is `/var/lib/nginx/state/`, and for FreeBSD distributions is `/var/db/nginx/state/`. + +```nginx +http { + # ... + upstream appservers { + zone appservers 64k; + state /var/lib/nginx/state/appservers.conf; + + # All servers are defined in the state file + # server appserv1.example.com weight=5; + # server appserv2.example.com:8080 fail_timeout=5s; + # server reserve1.example.com:8080 backup; + # server reserve2.example.com:8080 backup; + } +} +``` + +Keep in mind that the state file can be modified only with configuration commands from the [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) interface; do not modify the file directly (for example, using a text editor). diff --git a/content/nginx/admin-guide/load-balancer/grpc-health-check.md b/content/nginx/admin-guide/load-balancer/grpc-health-check.md new file mode 100644 index 000000000..8fc5032e5 --- /dev/null +++ b/content/nginx/admin-guide/load-balancer/grpc-health-check.md @@ -0,0 +1,85 @@ +--- +description: Monitor the health of gRPC servers in an upstream group by sending periodic + health checks. +docs: DOCS-416 +doctypes: +- task +title: gRPC Health Checks +toc: true +weight: 600 +--- + + +## Introduction +F5 NGINX Plus can monitor the health of upstream servers by making active health checks. NGINX Plus R23 supports the [gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md#grpc-health-checking-protocol) so that upstream gRPC services can be tested for their ability to handle new requests. This is particularly important in dynamic and containerized environments. When adding a new instance of a gRPC service, it is important that requests are sent to the fully operating service. + + +## Prerequisites + +- You have configured an upstream group of servers that handles gRPC network traffic and specified a shared memory zone that keeps the state of these servers, for example: + + ```nginx + stream { + #... + upstream grpc_backend { + zone grpc_backend 64k; + server 10.0.0.1:50051; + server 10.0.0.2:50051; + } + #... + } + ``` + +- You have configured a server that routes gRPC requests to the upstream server group: + + ```nginx + location /grpc { + grpc_pass grpc://grpc_backend; + } + ``` + + +## gRPC Servers that accept health checking protocol + +If your gRPC services support the gRPC health checking protocol, specify the [`type=grpc`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_grpc) parameter of the `health_check` directive: + +```nginx +location / { + grpc_pass grpc://grpc_backend; + health_check mandatory type=grpc; +} +``` + +In this example, according to health checking protocol, the `Check` method of the `Health` service will be invoked, and the gRPC server that respond with `SERVING` are considered healthy. + +The `mandatory` parameter ensures that the health check must pass before traffic is sent on an instance, for example, when it is introduced or reloaded. Note that the `type=grpc` must be specified after all other `health_check` parameters. + +If there are several gRPC services exposed on each upstream server then the most significant service can be monitored by specifying the service name with the `grpc_service` parameter: + +```nginx +location / { + grpc_pass grpc://grpc_backend; + health_check mandatory type=grpc grpc_service=MyStatus; +} +``` + + +## gRPC Servers that do not accept health checking protocol + +If your gRPC services do not implement the gRPC health checking protocol, it is still possible to perform health check by sending the `Check` method and expecting a particular [`status code`](https://github.com/grpc/grpc/blob/master/doc/statuscodes.md#status-codes-and-their-use-in-grpc). This will test that the upstream server is responding to gRPC requests. Specify the gRPC error response code with the `grpc_status` parameter: + +```nginx +location / { + grpc_pass grpc://grpc_backend; + health_check type=grpc grpc_status=12; # 12=unimplemented +} +``` + +In this case the response with the gRPC status of `12` / `UNIMPLEMENTED` will be considered healthy. + +Note that the `type=grpc` parameter is not compatible with `uri` or `match` parameters of the `health_check` directive. The `type=grpc` parameter must be specified after all other directive parameters: `grpc_service` and `grpc_status` must follow `type=grpc`. + + +## See also + +- [Deploying NGINX Plus as an API Gateway, Part 3: Publishing gRPC Services](https://www.nginx.com/blog/deploying-nginx-plus-as-an-api-gateway-part-3-publishing-grpc-services/) diff --git a/content/nginx/admin-guide/load-balancer/http-health-check.md b/content/nginx/admin-guide/load-balancer/http-health-check.md new file mode 100644 index 000000000..ac44412c2 --- /dev/null +++ b/content/nginx/admin-guide/load-balancer/http-health-check.md @@ -0,0 +1,234 @@ +--- +description: Monitor the health of HTTP servers in an upstream group by sending periodic + health checks, including customizable active health checks in F5 NGINX Plus. +docs: DOCS-417 +doctypes: +- task +title: HTTP Health Checks +toc: true +weight: 300 +--- + + +## Introduction + +NGINX and F5 NGINX Plus can continually test your upstream servers, avoid the servers that have failed, and gracefully add the recovered servers into the load‑balanced group. + + + +## Prerequisites + +- For passive health checks, [NGINX Open Source](https://nginx.org/en/) or [NGINX Plus](https://www.nginx.com/products/nginx) +- For active health checks and the [live activity monitoring dashboard]({{< relref "../monitoring/live-activity-monitoring.md" >}}), NGINX Plus +- A load‑balanced group of [HTTP upstream servers]({{< relref "http-load-balancer.md" >}}) + + + +## Passive Health Checks + +For passive health checks, NGINX and NGINX Plus monitor transactions as they happen, and try to resume failed connections. If the transaction still cannot be resumed, NGINX Open Source and NGINX Plus mark the server as unavailable and temporarily stop sending requests to it until it is marked active again. + +The conditions under which an upstream server is marked unavailable are defined for each upstream server with parameters to the [`server`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) directive in the `upstream` block: + +- [`fail_timeout`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#fail_timeout) – Sets the time during which a number of failed attempts must happen for the server to be marked unavailable, and also the time for which the server is marked unavailable (default is 10 seconds). +- [`max_fails`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#max_fails) – Sets the number of failed attempts that must occur during the `fail_timeout` period for the server to be marked unavailable (default is 1 attempt). + +In the following example, if NGINX fails to send a request to a server or does not receive a response from it 3 times in 30 seconds, it marks the server as unavailable for 30 seconds: + +```nginx +upstream backend { + server backend1.example.com; + server backend2.example.com max_fails=3 fail_timeout=30s; +} +``` + +Note that if there is only a single server in a group, the `fail_timeout` and `max_fails` parameters are ignored and the server is never marked unavailable. + + + +### Server Slow Start + +A recently recovered server can be easily overwhelmed by connections, which may cause the server to be marked as unavailable again. Slow start allows an upstream server to gradually recover its weight from zero to its nominal value after it has been recovered or became available. This can be done with the [`slow_start`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#slow_start) parameter of the upstream [`server`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) directive: + +```nginx +upstream backend { + server backend1.example.com slow_start=30s; + server backend2.example.com; + server 192.0.0.1 backup; +} +``` + +Note that if there is only a single server in a group, the `slow_start` parameter is ignored and the server is never marked unavailable. Slow start is exclusive to NGINX Plus. + + + +## Active Health Checks + +NGINX Plus can periodically check the health of upstream servers by sending special health‑check requests to each server and verifying the correct response. + +To enable active health checks: + +1. In the [`location`](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) that passes requests ([`proxy_pass`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass)) to an upstream group, include the [`health_check`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive: + + ```nginx + server { + location / { + proxy_pass http://backend; + health_check; + } + } + ``` + + This snippet defines a server that passes all requests (`location /`) to the upstream group called `backend`. It also enables advanced health monitoring with the [`health_check`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive: by default, every five seconds NGINX Plus sends a request for "**/**" to each server in the `backend` group. If any communication error or timeout occurs (the server responds with a status code outside the range from `200` through `399`) the health check fails. The server is marked as unhealthy, and NGINX Plus does not send client requests to it until it once again passes a health check. + + Optionally you can specify another port for health checks, for example, for monitoring health of many services on the same host. Specify a new port with the [`port`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_port) parameter of the [`health_check`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive: + + ```nginx + server { + location / { + proxy_pass http://backend; + health_check port=8080; + } + } + ``` + +2. In the upstream server group, define a shared memory zone with the [`zone`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) directive: + + ```nginx + http { + upstream backend { + zone backend 64k; + server backend1.example.com; + server backend2.example.com; + server backend3.example.com; + server backend4.example.com; + } + } + ``` + + The zone is shared among all worker processes and stores the configuration of the upstream group. This [enables]({{< relref "/nginx/admin-guide/load-balancer/http-load-balancer.md#sharing-data-with-multiple-worker-processes" >}}) the worker processes to use the same set of counters to keep track of responses from the servers in the group. + + The defaults for active health checks can be overridden with parameters to the `health_check` directive: + + ```nginx + location / { + proxy_pass http://backend; + health_check interval=10 fails=3 passes=2; + } + ``` + + Here, the [`interval`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_interval) parameter increases the delay between health checks from the default 5 seconds to 10 seconds. The [`fails`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_fails) parameter requires the server to fail three health checks to be marked as unhealthy (up from the default one). Finally, the [`passes`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_passes) parameter means the server must pass two consecutive checks to be marked as healthy again instead of the default one. + + You can also enable connection caching with the [`keepalive_time`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_keepalive_time) parameter - in case of TLS upstreams the full TLS handshake won't happen for every health check probe and the connection can be reused during the specified period of time: + + ```nginx + location / { + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_pass https://backend; + health_check interval=1 keepalive_time=60s; + } + ``` + + +### Specifying the Requested URI + +Use the [`uri`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_uri) parameter of the [`health_check`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive to set the URI to request in a health check: + +```nginx +location / { + proxy_pass http://backend; + health_check uri=/some/path; +} +``` + +The specified URI is appended to the server domain name or IP address set for the server in the `upstream` block. For the first server in the sample `backend` group declared above, a health check requests the URI **"http://backend1.example.com/some/path"**. + + + +### Defining Custom Conditions + +You can set custom conditions that the response must satisfy for the server to pass the health check. The conditions are defined in a [`match`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) block, which is referenced in the [`match`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_match) parameter of the [`health_check`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive. + +1. On the `http {}` level, specify the `match` `{}` block and name it, for example, `server_ok`: + + ```nginx + http { + #... + match server_ok { + # tests are here + } + } + ``` + +2. Refer to the block from the [`health_check`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive by specifying the [`match`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_match) parameter and the name of the [`match`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) block: + + ```nginx + http { + #... + match server_ok { + status 200-399; + body !~ "maintenance mode"; + } + server { + #... + location / { + proxy_pass http://backend; + health_check match=server_ok; + } + } + } + ``` + + Here the health check is passed if the status code of the response is in the range `200`–`399`, and its body does not contain the string `maintenance mode`. + +The `match` directive enables NGINX Plus to check the status code, header fields, and the body of a response. Using this directive it is possible to verify whether the status is in a specified range, whether a response includes a header, or whether the header or body matches a regular expression. The `match` directive can contain one status condition, one body condition, and multiple header conditions. A response must satisfy all conditions defined in `match` block for the server to pass the health check. + +For example, the following `match` directive matches responses that have status code `200`, the exact value `text/html` in the `Content-Type` header, and the text `Welcome to nginx!` in the body: + +```nginx +match welcome { + status 200; + header Content-Type = text/html; + body ~ "Welcome to nginx!"; +} +``` + +The following example uses the exclamation point (`!`) to define characteristics the response must not have to pass the health check. In this case, the health check passes when the status code is something other than `301`, `302`, `303`, or `307`, and there is no `Refresh` header. + +```nginx +match not_redirect { + status ! 301-303 307; + header ! Refresh; +} +``` + + +### Mandatory Health Checks + +By default, when a new server is added to an upstream group, NGINX Plus considers it healthy and sends traffic to it immediately. But for some servers, particularly if they were added through the API interface or through DNS resolution, it would be good to perform health check first before allowing them to handle traffic. + +The [`mandatory`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_mandatory) parameter requires every newly added server to pass all configured health checks before NGINX Plus sends traffic to it. + +When combined with [`slow start`](#slow_start), it gives a new server more time to connect to databases and “warm up” before being asked to handle their full share of traffic. + +Mandatory health checks can be marked as persistent, so that the previous state is remembered when reloading configuration. Specify the [`persistent`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_persistent) parameter together with the [`mandatory`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_mandatory) parameter: + +```nginx +upstream my_upstream { + zone my_upstream 64k; + server backend1.example.com slow_start=30s; +} + +server { + location / { + proxy_pass http://my_upstream; + health_check mandatory persistent; + } +} +``` + +Here the `mandatory` and `persistent` parameters of the `health_check` directive and the `slow_start` parameter of the `server` directive are specified. Servers that are added to the upstream group using the API or DNS interfaces are marked as unhealthy and receive no traffic until they pass the health check; at that point they start receiving a gradually increasing amount of traffic over a span of 30 seconds. If NGINX Plus configuration is reloaded and before reload the server was marked as healthy, mandatory health check are not performed and the server state is considered to be `up`. + +Health checks can also be enabled for non-HTTP protocols, such as [FastCGI](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html), [memcached](https://nginx.org/en/docs/http/ngx_http_memcached_module.html), [SCGI](https://nginx.org/en/docs/http/ngx_http_scgi_module.html), [uwsgi](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html), and also for [TCP and UDP](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check). + diff --git a/content/nginx/admin-guide/load-balancer/http-load-balancer.md b/content/nginx/admin-guide/load-balancer/http-load-balancer.md new file mode 100644 index 000000000..25c2a2e99 --- /dev/null +++ b/content/nginx/admin-guide/load-balancer/http-load-balancer.md @@ -0,0 +1,447 @@ +--- +description: Load balance HTTP traffic across web or application server groups, with + several algorithms and advanced features like slow-start and session persistence. +docs: DOCS-418 +doctypes: +- task +title: HTTP Load Balancing +toc: true +weight: 100 +--- + + +## Overview + +Load balancing across multiple application instances is a commonly used technique for optimizing resource utilization, maximizing throughput, reducing latency, and ensuring fault‑tolerant configurations. + +Watch the [F5 NGINX Plus for Load Balancing and Scaling](https://www.nginx.com/resources/webinars/nginx-plus-for-load-balancing-30-min/) webinar on demand for a deep dive on techniques that NGINX users employ to build large‑scale, highly available web services. + +NGINX and NGINX Plus can be used in different deployment scenarios as a [very efficient HTTP load balancer](https://www.nginx.com/blog/nginx-load-balance-deployment-models/). + + + +## Proxying HTTP Traffic to a Group of Servers + +To start using NGINX Plus or NGINX Open Source to load balance HTTP traffic to a group of servers, first you need to define the group with the [`upstream`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) directive. The directive is placed in the [`http`](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) context. + +Servers in the group are configured using the [`server`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) directive (not to be confused with the `server` block that defines a virtual server running on NGINX). For example, the following configuration defines a group named **backend** and consists of three server configurations (which may resolve in more than three actual servers): + +```nginx +http { + upstream backend { + server backend1.example.com weight=5; + server backend2.example.com; + server 192.0.0.1 backup; + } +} +``` + +To pass requests to a server group, the name of the group is specified in the [`proxy_pass`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive (or the [`fastcgi_pass`](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass), [`memcached_pass`](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_pass), [`scgi_pass`](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass), or [`uwsgi_pass`](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass) directives for those protocols.) In the next example, a virtual server running on NGINX passes all requests to the **backend** upstream group defined in the previous example: + +```nginx +server { + location / { + proxy_pass http://backend; + } +} +``` + +The following example combines the two snippets above and shows how to proxy HTTP requests to the **backend** server group. The group consists of three servers, two of them running instances of the same application while the third is a backup server. Because no load‑balancing algorithm is specified in the `upstream` block, NGINX uses the default algorithm, Round Robin: + +```nginx +http { + upstream backend { + server backend1.example.com; + server backend2.example.com; + server 192.0.0.1 backup; + } + + server { + location / { + proxy_pass http://backend; + } + } +} +``` + + + +## Choosing a Load-Balancing Method + +NGINX Open Source supports four load‑balancing methods, and NGINX Plus adds two more methods: + +1. Round Robin – Requests are distributed evenly across the servers, with [server weights](#weights) taken into consideration. This method is used by default (there is no directive for enabling it): + + ```nginx + upstream backend { + # no load balancing method is specified for Round Robin + server backend1.example.com; + server backend2.example.com; + } + ``` + +2. [Least Connections](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_conn) – A request is sent to the server with the least number of active connections, again with [server weights](#weights) taken into consideration: + + ```nginx + upstream backend { + least_conn; + server backend1.example.com; + server backend2.example.com; + } + ``` + +3. [IP Hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash) – The server to which a request is sent is determined from the client IP address. In this case, either the first three octets of the IPv4 address or the whole IPv6 address are used to calculate the hash value. The method guarantees that requests from the same address get to the same server unless it is not available. + + ```nginx + upstream backend { + ip_hash; + server backend1.example.com; + server backend2.example.com; + } + ``` + + If one of the servers needs to be temporarily removed from the load‑balancing rotation, it can be marked with the [down](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#down) parameter in order to preserve the current hashing of client IP addresses. Requests that were to be processed by this server are automatically sent to the next server in the group: + + ```nginx + upstream backend { + server backend1.example.com; + server backend2.example.com; + server backend3.example.com down; + } + ``` + +4. Generic [Hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash) – The server to which a request is sent is determined from a user‑defined key which can be a text string, variable, or a combination. For example, the key may be a paired source IP address and port, or a URI as in this example: + + ```nginx + upstream backend { + hash $request_uri consistent; + server backend1.example.com; + server backend2.example.com; + } + ``` + + The optional [consistent](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash) parameter to the `hash` directive enables [ketama](http://www.last.fm/user/RJ/journal/2007/04/10/rz_libketama_-_a_consistent_hashing_algo_for_memcache_clients) consistent‑hash load balancing. Requests are evenly distributed across all upstream servers based on the user‑defined hashed key value. If an upstream server is added to or removed from an upstream group, only a few keys are remapped which minimizes cache misses in the case of load‑balancing cache servers or other applications that accumulate state. + +5. [Least Time](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_time) (NGINX Plus only) – For each request, NGINX Plus selects the server with the lowest average latency and the lowest number of active connections, where the lowest average latency is calculated based on which of the following [parameters](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_time) to the `least_time` directive is included: + + - `header` – Time to receive the first byte from the server + - `last_byte` – Time to receive the full response from the server + - `last_byte inflight` – Time to receive the full response from the server, taking into account incomplete requests + + ```nginx + upstream backend { + least_time header; + server backend1.example.com; + server backend2.example.com; + } + ``` + +6. [Random](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#random) – Each request will be passed to a randomly selected server. +If the `two` parameter is specified, first, NGINX randomly selects two servers taking into account server weights, and then chooses one of these servers using the specified method: + + - `least_conn` – The least number of active connections + - `least_time=header` (NGINX Plus) – The least average time to receive the response header from the server ([`$upstream_header_time`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_header_time)) + - `least_time=last_byte` (NGINX Plus) – The least average time to receive the full response from the server ([`$upstream_response_time`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_response_time)) + + ```nginx + upstream backend { + random two least_time=last_byte; + server backend1.example.com; + server backend2.example.com; + server backend3.example.com; + server backend4.example.com; + } + ``` + + The **Random** load balancing method should be used for distributed environments where multiple load balancers are passing requests to the same set of backends. For environments where the load balancer has a full view of all requests, use other load balancing methods, such as round robin, least connections and least time. + +> **Note:** When configuring any method other than Round Robin, put the corresponding directive ([`hash`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash), [`ip_hash`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash), [`least_conn`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_conn), [`least_time`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_time), or [`random`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#random)) above the list of `server` directives in the [`upstream {}`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) block. + + + +## Server Weights + +By default, NGINX distributes requests among the servers in the group according to their weights using the Round Robin method. The [`weight`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#weight) parameter to the [`server`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) directive sets the weight of a server; the default is `1`: + +```nginx +upstream backend { + server backend1.example.com weight=5; + server backend2.example.com; + server 192.0.0.1 backup; +} +``` + +In the example, **backend1.example.com** has weight `5`; the other two servers have the default weight (`1`), but the one with IP address `192.0.0.1` is marked as a `backup` server and does not receive requests unless both of the other servers are unavailable. With this configuration of weights, out of every `6` requests, `5` are sent to **backend1.example.com** and `1` to **backend2.example.com**. + + + +## Server Slow-Start + +The server slow‑start feature prevents a recently recovered server from being overwhelmed by connections, which may time out and cause the server to be marked as failed again. + +In NGINX Plus, slow‑start allows an upstream server to gradually recover its weight from `0` to its nominal value after it has been recovered or became available. This can be done with the [`slow_start`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#slow_start) parameter to the `server` directive: + +```nginx +upstream backend { + server backend1.example.com slow_start=30s; + server backend2.example.com; + server 192.0.0.1 backup; +} +``` + +The time value (here, `30` seconds) sets the time during which NGINX Plus ramps up the number of connections to the server to the full value. + +Note that if there is only a single server in a group, the [`max_fails`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#max_fails), [`fail_timeout`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#fail_timeout), and [`slow_start`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#slow_start) parameters to the `server` directive are ignored and the server is never considered unavailable. + + + +## Enabling Session Persistence + +Session persistence means that NGINX Plus identifies user sessions and routes all requests in a given session to the same upstream server. + +NGINX Plus supports three session persistence methods. The methods are set with the [`sticky`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky) directive. (For session persistence with NGINX Open Source, use the `hash` or `ip_hash` directive as described [above](#method).) + +- [Sticky cookie](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_cookie) – NGINX Plus adds a session cookie to the first response from the upstream group and identifies the server that sent the response. The client's next request contains the cookie value and NGINX Plus route the request to the upstream server that responded to the first request: + + ```nginx + upstream backend { + server backend1.example.com; + server backend2.example.com; + sticky cookie srv_id expires=1h domain=.example.com path=/; + } + ``` + + In the example, the `srv_id` parameter sets the name of the cookie. The optional `expires` parameter sets the time for the browser to keep the cookie (here, `1` hour). The optional `domain` parameter defines the domain for which the cookie is set, and the optional `path` parameter defines the path for which the cookie is set. This is the simplest session persistence method. + +- [Sticky route](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_route) – NGINX Plus assigns a “route” to the client when it receives the first request. All subsequent requests are compared to the [`route`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#route) parameter of the `server` directive to identify the server to which the request is proxied. The route information is taken from either a cookie or the request URI. + + ```nginx + upstream backend { + server backend1.example.com route=a; + server backend2.example.com route=b; + sticky route $route_cookie $route_uri; + } + ``` + +- [Sticky learn](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_learn) method – NGINX Plus first finds session identifiers by inspecting requests and responses. Then NGINX Plus “learns” which upstream server corresponds to which session identifier. Generally, these identifiers are passed in a HTTP cookie. If a request contains a session identifier already “learned”, NGINX Plus forwards the request to the corresponding server: + + ```nginx + upstream backend { + server backend1.example.com; + server backend2.example.com; + sticky learn + create=$upstream_cookie_examplecookie + lookup=$cookie_examplecookie + zone=client_sessions:1m + timeout=1h; + } + ``` + + In the example, one of the upstream servers creates a session by setting the cookie `EXAMPLECOOKIE` in the response. + + The mandatory `create` parameter specifies a variable that indicates how a new session is created. In the example, new sessions are created from the cookie `EXAMPLECOOKIE` sent by the upstream server. + + The mandatory `lookup` parameter specifies how to search for existing sessions. In our example, existing sessions are searched in the cookie `EXAMPLECOOKIE` sent by the client. + + The mandatory `zone` parameter specifies a shared memory zone where all information about sticky sessions is kept. In our example, the zone is named **client_sessions** and is `1` megabyte in size. + + This is a more sophisticated session persistence method than the previous two as it does not require keeping any cookies on the client side: all info is kept server‑side in the shared memory zone. + + If there are several NGINX instances in a cluster that use the "sticky learn" method, it is possible to sync the contents of their shared memory zones on conditions that: + - the zones have the same name + - the [`zone_sync`](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync) functionality is configured on each instance + - the `sync` parameter is specified + + ```nginx + sticky learn + create=$upstream_cookie_examplecookie + lookup=$cookie_examplecookie + zone=client_sessions:1m + timeout=1h + sync; + } + ``` + + See [Runtime State Sharing in a Cluster]({{< relref "../high-availability/zone_sync.md" >}}) for details. + + + +## Limiting the Number of Connections + +With NGINX Plus, it is possible to limit the number of active connections to an upstream server by specifying the maximum number with the [`max_conns`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#max_conns) parameter. + +If the `max_conns` limit has been reached, the request is placed in a queue for further processing, provided that the [`queue`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#queue) directive is also included to set the maximum number of requests that can be simultaneously in the queue: + +```nginx +upstream backend { + server backend1.example.com max_conns=3; + server backend2.example.com; + queue 100 timeout=70; +} +``` + +If the queue is filled up with requests or the upstream server cannot be selected during the timeout specified by the optional `timeout` parameter, the client receives an error. + +Note that the `max_conns` limit is ignored if there are idle [`keepalive`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive) connections opened in other [`worker processes`](https://nginx.org/en/docs/ngx_core_module.html#worker_processes). As a result, the total number of connections to the server might exceed the `max_conns` value in a configuration where the memory is [shared with multiple worker processes](#zone). + + + +## Configuring Health Checks + +NGINX can continually test your HTTP upstream servers, avoid the servers that have failed, and gracefully add the recovered servers into the load‑balanced group. + +See [HTTP Health Checks]({{< relref "http-health-check.md" >}}) for instructions how to configure health checks for HTTP. + + + +## Sharing Data with Multiple Worker Processes + +If an [`upstream`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) block does not include the [`zone`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) directive, each worker process keeps its own copy of the server group configuration and maintains its own set of related counters. The counters include the current number of connections to each server in the group and the number of failed attempts to pass a request to a server. As a result, the server group configuration cannot be modified dynamically. + +When the `zone` directive is included in an `upstream` block, the configuration of the upstream group is kept in a memory area shared among all worker processes. This scenario is dynamically configurable, because the worker processes access the same copy of the group configuration and utilize the same related counters. + +The `zone` directive is mandatory for [active health checks](#health_active) and [dynamic reconfiguration]({{< relref "dynamic-configuration-api.md" >}}) of the upstream group. However, other features of upstream groups can benefit from the use of this directive as well. + +For example, if the configuration of a group is not shared, each worker process maintains its own counter for failed attempts to pass a request to a server (set by the [max_fails](#health_passive) parameter). In this case, each request gets to only one worker process. When the worker process that is selected to process a request fails to transmit the request to a server, other worker processes don’t know anything about it. While some worker process can consider a server unavailable, others might still send requests to this server. For a server to be definitively considered unavailable, the number of failed attempts during the timeframe set by the `fail_timeout` parameter must equal `max_fails` multiplied by the number of worker processes. On the other hand, the `zone` directive guarantees the expected behavior. + +Similarly, the [Least Connections](#method) load‑balancing method might not work as expected without the `zone` directive, at least under low load. This method passes a request to the server with the smallest number of active connections. If the configuration of the group is not shared, each worker process uses its own counter for the number of connections and might send a request to the same server that another worker process just sent a request to. However, you can increase the number of requests to reduce this effect. Under high load requests are distributed among worker processes evenly, and the `Least Connections` method works as expected. + + + +### Setting the Zone Size + +It is not possible to recommend an ideal memory‑zone size, because usage patterns vary widely. The required amount of memory is determined by which features (such as [session persistence](#sticky), [health checks](#health_active), or [DNS re‑resolving](#resolve)) are enabled and how the upstream servers are identified. + +As an example, with the `sticky_route` session persistence method and a single health check enabled, a 256‑KB zone can accommodate information about the indicated number of upstream servers: + +- 128 servers (each defined as an IP‑address:port pair) +- 88 servers (each defined as hostname:port pair where the hostname resolves to a single IP address) +- 12 servers (each defined as hostname:port pair where the hostname resolves to multiple IP addresses) + + + +## Configuring HTTP Load Balancing Using DNS + +The configuration of a server group can be modified at runtime using DNS. + +For servers in an upstream group that are identified with a domain name in the [`server`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) directive, NGINX Plus can monitor changes to the list of IP addresses in the corresponding DNS record, and automatically apply the changes to load balancing for the upstream group, without requiring a restart. This can be done by including the [`resolver`](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) directive in the [`http`](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) block along with the [`resolve`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#resolve) parameter to the `server` directive: + +```nginx +http { + resolver 10.0.0.1 valid=300s ipv6=off; + resolver_timeout 10s; + server { + location / { + proxy_pass http://backend; + } + } + upstream backend { + zone backend 32k; + least_conn; + # ... + server backend1.example.com resolve; + server backend2.example.com resolve; + } +} +``` + +In the example, the `resolve` parameter to the `server` directive tells NGINX Plus to periodically re‑resolve the **backend1.example.com** and **backend2.example.com** domain names into IP addresses. + +The [`resolver`](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) directive defines the IP address of the DNS server to which NGINX Plus sends requests (here, `10.0.0.1`). By default, NGINX Plus re‑resolves DNS records at the frequency specified by time‑to‑live (TTL) in the record, but you can override the TTL value with the `valid` parameter; in the example it is `300` seconds, or `5` minutes. + +The optional `ipv6=off` parameter means only IPv4 addresses are used for load balancing, though resolving of both IPv4 and IPv6 addresses is supported by default. + +If a domain name resolves to several IP addresses, the addresses are saved to the upstream configuration and load balanced. In our example, the servers are load balanced according to the [Least Connections](#method) load‑balancing method. If the list of IP addresses for a server has changed, NGINX Plus immediately starts load balancing across the new set of addresses. + + + +## Load Balancing of Microsoft Exchange Servers + +In [NGINX Plus Release 7](https://docs.nginx.com/nginx/releases/#nginxplus-release7-r7) and later, NGINX Plus can proxy Microsoft Exchange traffic to a server or a group of servers and load balance it. + +To set up load balancing of Microsoft Exchange servers: + +1. In a `location` block, configure proxying to the upstream group of Microsoft Exchange servers with the [`proxy_pass`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive: + + ```nginx + location / { + proxy_pass https://exchange; + # ... + } + ``` + +2. In order for Microsoft Exchange connections to pass to the upstream servers, in the `location` block set the [`proxy_http_version`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version) directive value to `1.1`, and the [`proxy_set_header`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) directive to `Connection ""`, just like for a keepalive connection: + + ```nginx + location / { + # ... + proxy_http_version 1.1; + proxy_set_header Connection ""; + # ... + } + ``` + +3. In the `http` block, configure a upstream group of Microsoft Exchange servers with an [`upstream`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) block named the same as the upstream group specified with the [`proxy_pass`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive in Step 1. Then specify the [`ntlm`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm) directive to allow the servers in the group to accept requests with NTLM authentication: + + ```nginx + http { + # ... + upstream exchange { + zone exchange 64k; + ntlm; + # ... + } + } + ``` + +4. Add Microsoft Exchange servers to the upstream group and optionally specify a [load‑balancing method](#method): + + ```nginx + http { + # ... + upstream exchange { + zone exchange 64k; + ntlm; + server exchange1.example.com; + server exchange2.example.com; + # ... + } + } + ``` + + + +### Complete NTLM Example + +```nginx +http { + # ... + upstream exchange { + zone exchange 64k; + ntlm; + server exchange1.example.com; + server exchange2.example.com; + } + + server { + listen 443 ssl; + ssl_certificate /etc/nginx/ssl/company.com.crt; + ssl_certificate_key /etc/nginx/ssl/company.com.key; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + + location / { + proxy_pass https://exchange; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + } +} +``` + +For more information about configuring Microsoft Exchange and NGINX Plus, see the [Load Balancing Microsoft Exchange Servers with NGINX Plus]({{< relref "/nginx/deployment-guides/load-balance-third-party/microsoft-exchange.md" >}}) deployment guide. + + + +## Dynamic Configuration Using the NGINX Plus API + +With NGINX Plus, the configuration of an upstream server group can be modified dynamically using the NGINX Plus API. A configuration command can be used to view all servers or a particular server in a group, modify parameter for a particular server, and add or remove servers. For more information and instructions, see [Configuring Dynamic Load Balancing with the NGINX Plus API]({{< relref "dynamic-configuration-api.md" >}}). diff --git a/content/nginx/admin-guide/load-balancer/tcp-health-check.md b/content/nginx/admin-guide/load-balancer/tcp-health-check.md new file mode 100644 index 000000000..2d968416e --- /dev/null +++ b/content/nginx/admin-guide/load-balancer/tcp-health-check.md @@ -0,0 +1,251 @@ +--- +description: Monitor the health of TCP servers in an upstream group by sending periodic + health checks, including customizable active health checks in F5 NGINX Plus. +docs: DOCS-419 +doctypes: +- task +title: TCP Health Checks +toc: true +weight: 400 +--- + + +## Introduction + +NGINX and F5 NGINX Plus can continually test your TCP upstream servers, avoid the servers that have failed, and gracefully add the recovered servers into the load‑balanced group. + + +## Prerequisites + +- You have configured an upstream group of TCP servers in the [`stream`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) context, for example: + + ```nginx + stream { + #... + upstream stream_backend { + server backend1.example.com:12345; + server backend2.example.com:12345; + server backend3.example.com:12345; + } + #... + } + ``` + +- You have configured a server that passes TCP connections to the server group: + + ```nginx + stream { + #... + server { + listen 12345; + proxy_pass stream_backend; + } + #... + } + ``` + + +## Passive TCP Health Checks + +If an attempt to connect to an upstream server times out or results in an error, NGINX Open Source or NGINX Plus can mark the server as unavailable and stop sending requests to it for a defined amount of time. To define the conditions under which NGINX considers an upstream server unavailable, include the following parameters to the [`server`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server) directive + +- [`fail_timeout`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#fail_timeout) – The amount of time within which a specified number of connection attempts must fail for the server to be considered unavailable. Also, the amount of time that NGINX considers the server unavailable after marking it so. +- [`max_fails`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#max_fails) – The number of failed attempts that happen during the specified time for NGINX to consider the server unavailable. + +The default values are `10` seconds and `1` attempt. So if a connection attempt times out or fails at least once in a 10‑second period, NGINX marks the server as unavailable for 10 seconds. The example shows how to set these parameters to 2 failures within 30 seconds: + +```nginx +upstream stream_backend { + server backend1.example.com:12345 weight=5; + server backend2.example.com:12345 max_fails=2 fail_timeout=30s; + server backend3.example.com:12346 max_conns=3; +} +``` + + +### Server Slow Start + +A recently recovered upstream server can be easily overwhelmed by connections, which may cause the server to be marked as unavailable again. Slow start allows an upstream server to gradually recover its weight from zero to its nominal value after it has been recovered or became available. This can be done with the [`slow_start`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#slow_start) parameter of the upstream [`server`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server) directive: + +```nginx +upstream backend { + server backend1.example.com:12345 slow_start=30s; + server backend2.example.com; + server 192.0.0.1 backup; +} +``` + +Note that if there is only a single server in a group, the `slow_start` parameter is ignored and the server is never marked unavailable. Slow start is exclusive to NGINX Plus. + + +## Active TCP Health Checks + +Health checks can be configured to test a wide range of failure types. For example, NGINX Plus can continually test upstream servers for responsiveness and avoid servers that have failed. + +NGINX Plus sends special health check requests to each upstream server and checks for a response that satisfies certain conditions. If a connection to the server cannot be established, the health check fails, and the server is considered unhealthy. NGINX Plus does not proxy client connections to unhealthy servers. If several health checks are configured for an upstream group, the failure of any check is enough to consider the corresponding server unhealthy. + +To enable active health checks: + +1. Specify a _shared memory zone_ – a special area where the NGINX Plus worker processes share state information about counters and connections. Add the [`zone`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#zone) directive to the upstream server group and specify the zone name (here, **stream_backend**) and the amount of memory (64 KB): + + ```nginx + stream { + #... + upstream stream_backend { + zone stream_backend 64k; + server backend1.example.com:12345; + server backend2.example.com:12345; + server backend3.example.com:12345; + } + #... + } + ``` + +2. Enable active health checks for the upstream group with the [`health_check`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check) directive: + + ```nginx + stream { + #... + server { + listen 12345; + proxy_pass stream_backend; + health_check; + #... + } + } + ``` + +3. If necessary, reduce a timeout between two consecutive health checks with the [`health_check_timeout`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_timeout) directive. This directive overrides the [`proxy_timeout`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout) value for health checks, as for health checks this timeout needs to be significantly shorter: + + ```nginx + stream { + #... + server { + listen 12345; + proxy_pass stream_backend; + health_check; + health_check_timeout 5s; + } + } + ``` + +4. By default, NGINX Plus sends health check messages to the port specified by the [`server`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server) directive in the [`upstream`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#upstream) block. You can specify another port for health checks, which is particularly helpful when monitoring the health of many services on the same host. To override the port, specify the [`port`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_port) parameter of the [`health_check`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check) directive: + + ```nginx + stream { + #... + server { + listen 12345; + proxy_pass stream_backend; + health_check port=12346; + health_check_timeout 5s; + } + } + ``` + + +### Fine-Tuning TCP Health Checks + +By default, NGINX Plus tries to connect to each server in an upstream server group every `5` seconds. If the connection cannot be established, NGINX Plus considers the health check failed, marks the server as unhealthy, and stops forwarding client connections to the server. + +To change the default behavior, include parameters to the [`health_check`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check) directive: + +- [`interval`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_interval) – How often (in seconds) NGINX Plus sends health check requests (default is `5` seconds) +- [`passes`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_passes) – Number of consecutive health checks the server must respond to to be considered healthy (default is `1`) +- [`fails`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_fails) – Number of consecutive health checks the server must fail to respond to to be considered unhealthy (default is `1`) + + ```nginx + stream { + #... + server { + listen 12345; + proxy_pass stream_backend; + health_check interval=10 passes=2 fails=3; + } + #... + } + ``` + + In the example, the time between TCP health checks is increased to `10` seconds, the server is considered unhealthy after `3` consecutive failed health checks, and the server needs to pass `2` consecutive checks to be considered healthy again. + + + +### The “match {}” Configuration Block + +You can create your own tests to verify server responses to health checks. These tests are defined with the [`match {}`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match) configuration block placed in the [`stream {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) context. + + +1. On the [`stream {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) level, specify the [`match {}`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match) block and name it, for example, `tcp_test`: + + ```nginx + stream { + #... + match tcp_test { + #... + } + } + ``` + + This block will contain tests described in **Step 3**. + +2. Refer to the block from the [`health_check`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check) directive by specifying the [`match`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_match) parameter and the name of the [`match`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match) block: + + ```nginx + stream { + #... + server { + listen 12345; + health_check match=tcp_test; + proxy_pass stream_backend; + } + #... + } + ``` + +3. Within the [`match`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match) block, specify the conditions or tests under which a health check succeed. The block can accept the following parameters: + + - [`send`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match_send) – The text string or hexadecimal literals (“\x” followed by two hex digits) to send to the server + - [`expect`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match_expect) – Literal string or regular expression that the data returned by the server needs to match + + These parameters can be used in different combinations, but no more than one `send` and one `expect` parameter can be specified at a time: + + - If no `send` or `expect` parameters are specified, the ability to connect to the server is tested. + + - If the `expect` parameter is specified, the server is expected to unconditionally send data first: + + ```nginx + match pop3 { + expect ~* "\+OK"; + } + ``` + + - If the `send` parameter is specified, it is expected that the connection will be successfully established and the specified string will be sent to the server: + + ```nginx + match pop_quit { + send QUIT; + } + ``` + + - If both the `send` and `expect` parameters are specified, then the string from the `send` parameter must match the regular expression from the `expect` parameter: + + ```nginx + stream { + #... + upstream stream_backend { + zone upstream_backend 64k; + server backend1.example.com:12345; + } + match http { + send "GET / HTTP/1.0\r\nHost: localhost\r\n\r\n"; + expect ~* "200 OK"; + } + server { + listen 12345; + health_check match=http; + proxy_pass stream_backend; + } + } + ``` + + The example shows that in order for a health check to pass, the HTTP request must be sent to the server, and the expected result from the server contains `200` `OK` to indicate a successful HTTP response. diff --git a/content/nginx/admin-guide/load-balancer/tcp-udp-load-balancer.md b/content/nginx/admin-guide/load-balancer/tcp-udp-load-balancer.md new file mode 100644 index 000000000..a4353f383 --- /dev/null +++ b/content/nginx/admin-guide/load-balancer/tcp-udp-load-balancer.md @@ -0,0 +1,473 @@ +--- +description: This chapter describes how to use F5 NGINX Plus and NGINX Open Source to + proxy and load balance TCP and UDP traffic. +docs: DOCS-420 +doctypes: +- task +title: TCP and UDP Load Balancing +toc: true +weight: 200 +--- + + +## Introduction + +[Load balancing](https://www.nginx.com/solutions/load-balancing/) refers to efficiently distributing network traffic across multiple backend servers. + +In F5 NGINX Plus Release 5 and later, NGINX Plus can proxy and load balance Transmission Control Protocol) (TCP) traffic. TCP is the protocol for many popular applications and services, such as LDAP, MySQL, and RTMP. + +In NGINX Plus Release 9 and later, NGINX Plus can proxy and load balance UDP traffic. UDP (User Datagram Protocol) is the protocol for many popular non-transactional applications, such as DNS, syslog, and RADIUS. + +To load balance HTTP traffic, refer to the [HTTP Load Balancing]({{< relref "http-load-balancer.md" >}}) article. + + +## Prerequisites + +- Latest NGINX Plus (no extra build steps required) or latest [NGINX Open Source](https://nginx.org/en/download.html) built with the `--with-stream` configuration flag +- An application, database, or service that communicates over TCP or UDP +- Upstream servers, each running the same instance of the application, database, or service + + +## Configuring Reverse Proxy + +First, you will need to configure _reverse proxy_ so that NGINX Plus or NGINX Open Source can forward TCP connections or UDP datagrams from clients to an upstream group or a proxied server. + +Open the NGINX configuration file and perform the following steps: + +1. Create a top‑level [`stream {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) block: + + ```nginx + stream { + # ... + } + ``` + +2. Define one or more [`server {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server) configuration blocks for each virtual server in the top‑level `stream {}` context. + +3. Within the `server {}` configuration block for each server, include the [`listen`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) directive to define the _IP address_ and/or _port_ on which the server listens. + + For UDP traffic, also include the [`udp`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#udp) parameter. As TCP is the default protocol for the `stream` context, there is no `tcp` parameter to the `listen` directive: + + ```nginx + stream { + + server { + listen 12345; + # ... + } + + server { + listen 53 udp; + # ... + } + # ... + } + ``` + +4. Include the [`proxy_pass`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_pass) directive to define the proxied server or an upstream group to which the server forwards traffic: + + ```nginx + stream { + + server { + listen 12345; + #TCP traffic will be forwarded to the "stream_backend" upstream group + proxy_pass stream_backend; + } + + server { + listen 12346; + #TCP traffic will be forwarded to the specified server + proxy_pass backend.example.com:12346; + } + + server { + listen 53 udp; + #UDP traffic will be forwarded to the "dns_servers" upstream group + proxy_pass dns_servers; + } + # ... + } + ``` + +5. If the proxy server has several network interfaces, you can optionally configure NGINX to use a particular source IP address when connecting to an upstream server. This may be useful if a proxied server behind NGINX is configured to accept connections from particular IP networks or IP address ranges. + + Include the [`proxy_bind`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_bind) directive and the IP address of the appropriate network interface: + + ```nginx + stream { + # ... + server { + listen 127.0.0.1:12345; + proxy_pass backend.example.com:12345; + proxy_bind 127.0.0.1:12345; + } + } + ``` + +6. Optionally, you can tune the size of two in‑memory buffers where NGINX can put data from both the client and upstream connections. If there is a small volume of data, the buffers can be reduced which may save memory resources. If there is a large volume of data, the buffer size can be increased to reduce the number of socket read/write operations. As soon as data is received on one connection, NGINX reads it and forwards it over the other connection. The buffers are controlled with the [`proxy_buffer_size`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_buffer_size) directive: + + ```nginx + stream { + # ... + server { + listen 127.0.0.1:12345; + proxy_pass backend.example.com:12345; + proxy_buffer_size 16k; + } + } + ``` + + +## Configuring TCP or UDP Load Balancing + +To configure load balancing: + +1. Create a group of servers, or an _upstream group_ whose traffic will be load balanced. Define one or more [`upstream {}`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#upstream) configuration blocks in the top‑level [`stream {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) context and set the name for the upstream group, for example, `stream_backend` for TCP servers and `dns_servers` for UDP servers: + + ```nginx + stream { + + upstream stream_backend { + # ... + } + + upstream dns_servers { + # ... + } + + # ... + } + ``` + + Make sure that the name of the upstream group is referenced by a [`proxy_pass`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_pass) directive, like those configured [above](#proxy_pass) for reverse proxy. + +2. Populate the upstream group with _upstream servers_. Within the [`upstream {}`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#upstream) block, add a [`server`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server) directive for each upstream server, specifying its IP address or hostname (which can resolve to multiple IP addresses) and an _obligatory_ port number. Note that you do not define the protocol for each server, because that is defined for the entire upstream group by the parameter you include on the `listen` directive in the `server` block, which you have created [earlier](#proxy_pass). + + ```nginx + stream { + + upstream stream_backend { + server backend1.example.com:12345; + server backend2.example.com:12345; + server backend3.example.com:12346; + # ... + } + + upstream dns_servers { + server 192.168.136.130:53; + server 192.168.136.131:53; + # ... + } + + # ... + } + ``` + +3. Configure the load‑balancing method used by the upstream group. You can specify one of the following methods: + + - Round Robin – By default, NGINX uses the Round Robin algorithm to load balance traffic, directing it sequentially to the servers in the configured upstream group. Because it is the default method, there is no `round‑robin` directive; simply create an `upstream {}` configuration block in the top‑level `stream {}` context and add `server` directives as described in the previous step. + + - [Least Connections](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#least_conn) – NGINX selects the server with the smaller number of current active connections. + + - [Least Time](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#least_time) (NGINX Plus only) – NGINX Plus selects the server with the lowest average latency and the least number of active connections. The method used to calculate lowest average latency depends on which of the following parameters is included on the `least_time` directive: + + - `connect` – Time to connect to the upstream server + - `first_byte` – Time to receive the first byte of data + - `last_byte` – Time to receive the full response from the server + + ```nginx + upstream stream_backend { + least_time first_byte; + server backend1.example.com:12345; + server backend2.example.com:12345; + server backend3.example.com:12346; + } + ``` + + - [Hash](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#hash) – NGINX selects the server based on a user‑defined key, for example, the source IP address ([`$remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_addr)): + + ```nginx + upstream stream_backend { + hash $remote_addr; + server backend1.example.com:12345; + server backend2.example.com:12345; + server backend3.example.com:12346; + } + ``` + + The `Hash` load‑balancing method is also used to configure _session persistence_. As the hash function is based on client IP address, connections from a given client are always passed to the same server unless the server is down or otherwise unavailable. Specify an optional `consistent` parameter to apply the [ketama](http://www.last.fm/user/RJ/journal/2007/04/10/rz_libketama_-_a_consistent_hashing_algo_for_memcache_clients) consistent hashing method: + + ```nginx + hash $remote_addr consistent; + ``` + + - [Random](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#random) – Each connection will be passed to a randomly selected server. If the `two` parameter is specified, first, NGINX randomly selects two servers taking into account server weights, and then chooses one of these servers using the specified method: + + - `least_conn` – The least number of active connections + - `least_time=connect` (NGINX Plus) – The time to connect to the upstream server ([`$upstream_connect_time`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#var_upstream_connect_time)) + - `least_time=first_byte` (NGINX Plus) – The least average time to receive the first byte of data from the server ([`$upstream_first_byte_time`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#var_upstream_first_byte_time)) + - `least_time=last_byte` (NGINX Plus) – The least average time to receive the last byte of data from the server ([`$upstream_session_time`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#var_upstream_session_time)) + + ```nginx + upstream stream_backend { + random two least_time=last_byte; + server backend1.example.com:12345; + server backend2.example.com:12345; + server backend3.example.com:12346; + server backend4.example.com:12346; + } + ``` + + The **Random** load balancing method should be used for distributed environments where multiple load balancers are passing requests to the same set of backends. For environments where the load balancer has a full view of all requests, use other load balancing methods, such as round robin, least connections and least time. + +5. Optionally, for each upstream server specify server‑specific parameters including [maximum number of connections](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#max_conns), [server weight](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#weight), and so on: + + ```nginx + upstream stream_backend { + hash $remote_addr consistent; + server backend1.example.com:12345 weight=5; + server backend2.example.com:12345; + server backend3.example.com:12346 max_conns=3; + } + upstream dns_servers { + least_conn; + server 192.168.136.130:53; + server 192.168.136.131:53; + # ... + } + ``` + +An alternative approach is to proxy traffic to a single server instead of an upstream group. If you identify the server by hostname, and configure the hostname to resolve to multiple IP addresses, then NGINX load balances traffic across the IP addresses using the `Round Robin` algorithm. In this case, you _must_ specify the server’s port number in the `proxy_pass` directive and _must not_ specify the protocol before IP address or hostname: + +```nginx +stream { + # ... + server { + listen 12345; + proxy_pass backend.example.com:12345; + } +} +``` + + +## Configuring Health Checks + +NGINX can continually test your TCP or UDP upstream servers, avoid the servers that have failed, and gracefully add the recovered servers into the load‑balanced group. + +See [TCP Health Checks]({{< relref "tcp-health-check.md" >}}) for instructions how to configure health checks for TCP. + +See [UDP Health Checks]({{< relref "udp-health-check.md" >}}) for instructions how to configure health checks for UDP. + + +## On-the-Fly Configuration + +Upstream server groups can be easily reconfigured on-the-fly using NGINX Plus REST API. Using this interface, you can view all servers in an upstream group or a particular server, modify server parameters, and add or remove upstream servers. + +To enable on-the-fly configuration: + +1. Create the top-level `http {}` block or make sure it is present in your configuration: + + ```nginx + http { + # ... + } + ``` + +2. Create a location for configuration requests, for example, _api_: + + ```nginx + http { + server { + location /api { + # ... + } + } + } + ``` + +3. In this location specify the [`api`](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) directive: + + ```nginx + http { + server { + location /api { + api; + # ... + } + } + } + ``` + +4. By default, the NGINX Plus API provides read-only access to data. The `write=on` parameter enables read/write access so that changes can be made to upstreams: + + ```nginx + http { + server { + location /api { + api write=on; + # ... + } + } + } + ``` + +5. Limit access to this location with [`allow`](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow) and [`deny`](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny) directives: + + ```nginx + http { + server { + location /api { + api write=on; + allow 127.0.0.1; # permit access from localhost + deny all; # deny access from everywhere else + } + } + } + ``` + +6. When the API is enabled in the write mode, it is recommended restricting access to `PATCH`, `POST`, and `DELETE` methods to particular users. This can be done by implementing [HTTP basic authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html): + + ```nginx + http { + server { + location /api { + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + api write=on; + allow 127.0.0.1; + deny all; + } + } + } + ``` + +7. Create a _shared memory zone_ for the group of upstream servers so that all worker processes can use the same configuration. To do this, in the top-level [`stream {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) block, find the target upsteam group, add the [`zone`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#zone) directive to the upstream server group and specify the zone name (here, **stream_backend**) and the amount of memory (64 KB): + + ```nginx + stream { + upstream stream_backend { + zone backend 64k; + # ... + } + } + ``` + + +### On-the-Fly Configuration Example + +```nginx +stream { + # ... + # Configuration of an upstream server group + upstream appservers { + zone appservers 64k; + server appserv1.example.com:12345 weight=5; + server appserv2.example.com:12345 fail_timeout=5s; + server backup1.example.com:12345 backup; + server backup2.example.com:12345 backup; + } + + server { + # Server that proxies connections to the upstream group + proxy_pass appservers; + health_check; + } +} +http { + # ... + server { + # Location for API requests + location /api { + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + api write=on; + allow 127.0.0.1; + deny all; + } + } +} +``` + +Here, access to the location is allowed only from the localhost address (`127.0.0.1`). Access from all other IP addresses is denied. + +To pass a configuration command to NGINX, send an API command by any method, for example, with curl. + +For example, to add a new server to the server group, send a `POST` request: + +```shell +curl -X POST -d '{ \ + "server": "appserv3.example.com:12345", \ + "weight": 4 \ + }' -s 'http://127.0.0.1/api/6/stream/upstreams/appservers/servers' +``` + +To remove a server from the server group, send a `DELETE` request: + +```shell +curl -X DELETE -s 'http://127.0.0.1/api/6/stream/upstreams/appservers/servers/0' +``` + +To modify a parameter for a specific server, send a `PATCH` request: + +```shell +curl -X PATCH -d '{ "down": true }' -s 'http://127.0.0.1/api/6/http/upstreams/appservers/servers/0' +``` + + +## Example of TCP and UDP Load-Balancing Configuration + +This is a configuration example of TCP and UDP load balancing with NGINX: + +```nginx +stream { + upstream stream_backend { + least_conn; + server backend1.example.com:12345 weight=5; + server backend2.example.com:12345 max_fails=2 fail_timeout=30s; + server backend3.example.com:12345 max_conns=3; + } + + upstream dns_servers { + least_conn; + server 192.168.136.130:53; + server 192.168.136.131:53; + server 192.168.136.132:53; + } + + server { + listen 12345; + proxy_pass stream_backend; + proxy_timeout 3s; + proxy_connect_timeout 1s; + } + + server { + listen 53 udp; + proxy_pass dns_servers; + } + + server { + listen 12346; + proxy_pass backend4.example.com:12346; + } +} +``` + +In this example, all TCP and UDP proxy‑related functionality is configured inside the [`stream`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) block, just as settings for HTTP requests are configured in the [`http`](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) block. + +There are two named [`upstream`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#upstream) blocks, each containing three servers that host the same content as one another. In the [`server`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server) for each server, the server name is followed by the obligatory port number. Connections are distributed among the servers according to the [Least Connections](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#least_conn) load‑balancing method: a connection goes to the server with the fewest number of active connections. + +The three [`server`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server) blocks define three virtual servers: + +- The first server listens on port 12345 and proxies all TCP connections to the **stream_backend** group of upstream servers. Note that the [`proxy_pass`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_pass) directive defined in the context of the `stream` module must not contain a protocol. + + Two optional timeout parameters are specified: the [`proxy_connect_timeout`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_connect_timeout) directive sets the timeout required for establishing a connection with a server in the **stream_backend** group. The [`proxy_timeout`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout) directive sets a timeout used after proxying to one of the servers in the **stream_backend** group has started. + +- The second server listens on port 53 and proxies all UDP datagrams (the `udp` parameter to the `listen` directive) to an upstream group called **dns_servers**. If the `udp` parameter is not specified, the socket listens for TCP connections. + +- The third virtual server listens on port 12346 and proxies TCP connections to **backend4.example.com**, which can resolve to several IP addresses that are load balanced with the Round Robin method. diff --git a/content/nginx/admin-guide/load-balancer/udp-health-check.md b/content/nginx/admin-guide/load-balancer/udp-health-check.md new file mode 100644 index 000000000..7b00048cf --- /dev/null +++ b/content/nginx/admin-guide/load-balancer/udp-health-check.md @@ -0,0 +1,180 @@ +--- +description: "This chapter describes how to configure different types of health checks\ + \ for UDP servers in a load\u2011balanced upstream server group." +docs: DOCS-421 +doctypes: +- task +title: UDP Health Checks +toc: true +weight: 500 +--- + + +## Prerequisites + +- You have configured an upstream group of servers that handles UDP network traffic (DNS, RADIUS, syslog) in the [`stream {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) context, for example: + + ```nginx + stream { + #... + upstream dns_upstream { + server 192.168.136.130:53; + server 192.168.136.131:53; + server 192.168.136.132:53; + } + #... + } + ``` + +- You have configured a server that passes UDP datagrams to the upstream server group: + + ```nginx + stream { + #... + server { + listen 53 udp; + proxy_pass dns_upstream; + proxy_timeout 1s; + proxy_responses 1; + error_log logs/dns.log; + } + #... + } + ``` + + See [TCP and UDP Load Balancing]({{< relref "../load-balancer/tcp-udp-load-balancer.md" >}}) for details. + + +## Passive UDP Health Checks + +NGINX Open Source or F5 NGINX Plus can mark the server as unavailable and stop sending UDP datagrams to it for some time if the server replies with an error or times out. + +The number of consecutive failed connection attempts within a certain time period is set with the [`max_fails`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#max_fails) parameter for an [`upstream server`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server) (default value is `1`). + +The time period is set with the [`fail_timeout`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#fail_timeout) parameter (default value is `10` seconds). The parameter also sets the amount of time that NGINX considers the server unavailable after marking it so. + +So if a connection attempt times out or fails at least once in a 10‑second period, NGINX marks the server as unavailable for 10 seconds. The example shows how to set these parameters to 2 failures within 60 seconds: + +```nginx +upstream dns_upstream { + server 192.168.136.130:53 fail_timeout=60s; + server 192.168.136.131:53 fail_timeout=60s; +} +``` + + +## Active UDP Health Checks + +Active Health Checks allow testing a wider range of failure types and are available only for NGINX Plus. For example, instead of waiting for an actual TCP request from a DNS client to fail before marking the DNS server as down (as in passive health checks), NGINX Plus will send special health check requests to each upstream server and check for a response that satisfies certain conditions. If a connection to the server cannot be established, the health check fails, and the server is considered unhealthy. NGINX Plus does not proxy client connections to unhealthy servers. If more than one health check is defined, the failure of any check is enough to consider the corresponding upstream server unhealthy. + +To enable active health checks: + +1. In the upstream group, specify a _shared memory zone_ with the [`zone`](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#zone) directive – a special area where the NGINX Plus worker processes share state information about counters and connections. In the `zone` directive, specify the zone _name_ (`dns_zone` in the example) and the zone _size_ (`64k` in the example): + + ```nginx + stream { + #... + upstream dns_upstream { + zone dns_zone 64k; + server 192.168.136.130:53; + server 192.168.136.131:53; + server 192.168.136.132:53; + } + #... + } + ``` + +2. In the `server` block that forwards traffic to the upstream group (via [`proxy_pass`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_pass)), specify the [`udp`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_udp) parameter to the `health_check` directive: + + ```nginx + stream { + #... + server { + listen 53 udp; + proxy_pass dns_upstream; + health_check udp; + } + #... + } + ``` + +A basic UDP health check assumes that NGINX Plus sends the “nginx health check” string to an upstream server and expects the absence of ICMP “Destination Unreachable” message in response. You can configure your own health check tests in the `match {}` block. See [The “match {}” Configuration Block](#hc_active_match) for details. + + +### Fine-Tuning UDP Health Checks + +You can fine‑tune the health check by specifying the following parameters to the [`health_check`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check) directive: + +- `interval`– How often (in seconds) NGINX Plus sends health check requests (default is `5` seconds) +- `passes`– Number of consecutive health checks the server must respond to to be considered healthy (default is `1`) +- `fails`– Number of consecutive health checks the server must fail to respond to to be considered unhealthy (default is `1`) + +```nginx +server { + listen 53 udp; + proxy_pass dns_upstream; + health_check interval=20 passes=2 fails=2 udp; +} +``` + +In the example, the time between UDP health checks is increased to 20 seconds, the server is considered unhealthy after 2 consecutive failed health checks, and the server needs to pass 2 consecutive checks to be considered healthy again. + + +### The “match {}” Configuration Block + +You can verify server responses to health checks by configuring a number of tests. These tests are defined within the [`match {}`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match) configuration block. + +1. In the top‑level `stream {}` context, specify the [`match {}`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match) block and set its name, for example, `udp_test`: + + ```nginx + stream { + #... + match udp_test { + #... + } + } + ``` + +2. Refer to the block from the `health_check` directive by including the [`match`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_match) parameter to specify the name of the `match {}` block: + + ```nginx + stream { + #... + server { + listen 53 udp; + proxy_pass dns_upstream; + health_check match=udp_test udp; + } + #... + } + ``` + +3. In the `match {}` block, specify conditions or tests under which a health check succeeds. This is done with `send` and `expect` parameters: + - [`send`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match_send)– The text string or hexadecimal literals (“/x” followed by two hex digits) to send to the server + - [`expect`](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match_expect)– Literal string or regular expression that the data returned by the server needs to match + + These parameters can be used in different combinations, but no more than one `send` and one `expect` parameter can be specified at a time. + + +#### Example Test for NTP + +To fine‑tune health checks for NTP, you should specify both `send` and `expect` parameters with the following text strings: + +```nginx +match ntp { + send \xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00; + expect ~* \x24; +} +``` + + +#### Example Test for DNS + +To fine‑tune health checks for DNS, you should also specify both `send` and `expect` parameters with the following text strings: + +```nginx +match dns { + send \x00\x2a\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x03\x73\x74\x6c\x04\x75\x6d\x73\x6c\x03\x65\x64\x75\x00\x00\x01\x00\x01; + expect ~* "health.is.good"; +} +``` diff --git a/content/nginx/admin-guide/load-balancer/using-proxy-protocol.md b/content/nginx/admin-guide/load-balancer/using-proxy-protocol.md new file mode 100644 index 000000000..bee163e00 --- /dev/null +++ b/content/nginx/admin-guide/load-balancer/using-proxy-protocol.md @@ -0,0 +1,216 @@ +--- +description: null +docs: DOCS-422 +doctypes: +- task +title: Accepting the PROXY Protocol +toc: true +weight: 800 +--- + +This article explains how to configure NGINX and F5 NGINX Plus to accept the PROXY protocol, rewrite the IP address of a load balancer or proxy to the one received in the PROXY protocol header, configure simple logging of a client’s IP address, and enable the PROXY protocol between NGINX and a TCP upstream server. + + +## Introduction + +The [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) enables NGINX and NGINX Plus to receive client connection information passed through proxy servers and load balancers such as HAproxy and Amazon Elastic Load Balancer (ELB). + +With the PROXY protocol, NGINX can learn the originating IP address from HTTP, SSL, HTTP/2, SPDY, WebSocket, and TCP. Knowing the originating IP address of a client may be useful for setting a particular language for a website, keeping a denylist of IP addresses, or simply for logging and statistics purposes. + +The information passed via the PROXY protocol is the client IP address, the proxy server IP address, and both port numbers. + +Using this data, NGINX can get the originating IP address of the client in several ways: + +- With the [`$proxy_protocol_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_addr) and [`$proxy_protocol_port`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_port) variables which capture the original client IP address and port. The [`$remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_addr) and [`$remote_port`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_port) variables capture the IP address and port of the load balancer. + +- With the [RealIP](https://nginx.org/en/docs/http/ngx_http_realip_module.html) module which rewrites the values in the [`$remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_addr) and [`$remote_port`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_port) variables, replacing the IP address and port of the load balancer with the original client IP address and port. The [`$realip_remote_addr`](https://nginx.org/en/docs/http/ngx_http_realip_module.html#var_realip_remote_addr) and [`$realip_remote_port`](https://nginx.org/en/docs/http/ngx_http_realip_module.html#var_realip_remote_port) variables retain the address and port of the load balancer, and the [`$proxy_protocol_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_addr) and [`$proxy_protocol_port`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_port) variables retain the original client IP address and port anyway. + + +## Prerequisites + +- To accept the PROXY protocol v2, NGINX Plus R16 and later or NGINX Open Source [1.13.11](https://nginx.org/en/CHANGES) and later + +- To accept the PROXY protocol for HTTP, NGINX Plus R3 and later or NGINX Open Source [1.5.12](https://nginx.org/en/CHANGES) and later + +- For TCP client‑side PROXY protocol support, NGINX Plus R7 and later or NGINX Open Source [1.9.3](https://nginx.org/en/CHANGES) and later + +- To accept the PROXY protocol for TCP, NGINX Plus R11 and later or NGINX Open Source [1.11.4](https://nginx.org/en/CHANGES) and later + +- The Real‑IP modules for [HTTP](https://nginx.org/en/docs/http/ngx_http_realip_module.html) and [Stream TCP](https://nginx.org/en/docs/stream/ngx_stream_realip_module.html) are not included in NGINX Open Source by default; see [Installing NGINX Open Source]({{< relref "../installing-nginx/installing-nginx-open-source.md" >}}) for details. No extra steps are required for NGINX Plus. + + + +## Configuring NGINX to Accept the PROXY Protocol + +To configure NGINX to accept PROXY protocol headers, add the `proxy_protocol` parameter to the `listen` directive in a `server` block in the [`http {}`](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) or [`stream {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) block. + +```nginx +http { + #... + server { + listen 80 proxy_protocol; + listen 443 ssl proxy_protocol; + #... + } +} + +stream { + #... + server { + listen 12345 proxy_protocol; + #... + } +} +``` + +Now you can use the [`$proxy_protocol_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_addr) and [`$proxy_protocol_port`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_port) variables for the client IP address and port and additionally configure the [HTTP](https://nginx.org/en/docs/http/ngx_http_realip_module.html) and [`stream`](https://nginx.org/en/docs/stream/ngx_stream_realip_module.html) RealIP modules to replace the IP address of the load balancer in the [`$remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_addr) and [`$remote_port`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_port) variables with the IP address and port of the client. + + +## Changing the Load Balancer's IP Address To the Client IP Address + +You can replace the address of the load balancer or TCP proxy with the client IP address received from the PROXY protocol. This can be done with the [HTTP](https://nginx.org/en/docs/http/ngx_http_realip_module.html) and [`stream`](https://nginx.org/en/docs/stream/ngx_stream_realip_module.html) RealIP modules. With these modules, the [`$remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_addr) and [`$remote_port`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_port) variables retain the real IP address and port of the client, while the [`$realip_remote_addr`](https://nginx.org/en/docs/http/ngx_http_realip_module.html#var_realip_remote_addr) and [`$realip_remote_port`](https://nginx.org/en/docs/http/ngx_http_realip_module.html#var_realip_remote_port) variables retain the IP address and port of the load balancer. + +To change the IP address from the load balancer's IP address to the client's IP address: + +1. Make sure you've configured NGINX to accept the PROXY protocol headers. See [Configuring NGINX to Accept the PROXY Protocol](#listen). + +2. Make sure that your NGINX installation includes the [HTTP](https://nginx.org/en/docs/http/ngx_http_realip_module.html) and [Stream](https://nginx.org/en/docs/stream/ngx_stream_realip_module.html) Real‑IP modules: + + ```shell + nginx -V 2>&1 | grep -- 'http_realip_module' + nginx -V 2>&1 | grep -- 'stream_realip_module' + ``` + + If not, recompile NGINX with these modules. See [Installing NGINX Open Source]({{< relref "../installing-nginx/installing-nginx-open-source.md" >}}) for details. No extra steps are required for NGINX Plus. + +3. In the `set_real_ip_from` directive for [HTTP](https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from), [Stream](https://nginx.org/en/docs/stream/ngx_stream_realip_module.html#set_real_ip_from), or both, specify the IP address or the CIDR range of addresses of the TCP proxy or load balancer: + + ```nginx + server { + #... + set_real_ip_from 192.168.1.0/24; + #... + } + ``` + +4. In the `http {}` context, change the IP address of the load balancer to the IP address of the client received from the PROXY protocol header, by specifying the `proxy_protocol` parameter to the [`real_ip_header`](https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header) directive: + + ```nginx + http { + server { + #... + real_ip_header proxy_protocol; + } + } + ``` + + +## Logging the Original IP Address + +When you know the original IP address of the client, you can configure the correct logging: + +1. For HTTP, configure NGINX to pass the client IP address to upstream servers using the [`$proxy_protocol_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_addr) variable with the [`proxy_set_header`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) directive: + + ```nginx + http { + proxy_set_header X-Real-IP $proxy_protocol_addr; + proxy_set_header X-Forwarded-For $proxy_protocol_addr; + } + ``` + +2. Add the [`$proxy_protocol_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_addr) variable to the `log_format` directive ([HTTP](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) or [Stream](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#log_format)): + + - In the `http` block: + + ```nginx + http { + #... + log_format combined '$proxy_protocol_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent"'; + } + ``` + + - In the `stream` block: + + ```nginx + stream { + #... + log_format basic '$proxy_protocol_addr - $remote_user [$time_local] ' + '$protocol $status $bytes_sent $bytes_received ' + '$session_time'; + } + ``` + + +## PROXY Protocol for a TCP Connection to an Upstream + +For a TCP stream, the PROXY protocol can be enabled for connections between NGINX and an upstream server. To enable the PROXY protocol, include the [`proxy_protocol`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_protocol) directive in a `server` block at the `stream {}` level: + +```nginx +stream { + server { + listen 12345; + proxy_pass example.com:12345; + proxy_protocol on; + } +} +``` + + +## Example + +```nginx +http { + log_format combined '$proxy_protocol_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent"'; + #... + + server { + server_name localhost; + + listen 80 proxy_protocol; + listen 443 ssl proxy_protocol; + + ssl_certificate /etc/nginx/ssl/public.example.com.pem; + ssl_certificate_key /etc/nginx/ssl/public.example.com.key; + + location /app/ { + proxy_pass http://backend1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $proxy_protocol_addr; + proxy_set_header X-Forwarded-For $proxy_protocol_addr; + } + } +} + +stream { + log_format basic '$proxy_protocol_addr - $remote_user [$time_local] ' + '$protocol $status $bytes_sent $bytes_received ' + '$session_time'; + #... + server { + listen 12345 ssl proxy_protocol; + + ssl_certificate /etc/nginx/ssl/cert.pem; + ssl_certificate_key /etc/nginx/ssl/cert.key; + + proxy_pass backend.example.com:12345; + proxy_protocol on; + } +} +``` + +The example assumes that there is a load balancer in front of NGINX to handle all incoming HTTPS traffic, for example Amazon ELB. NGINX accepts HTTPS traffic on port 443 (`listen 443 ssl;`), TCP traffic on port 12345, and accepts the client’s IP address passed from the load balancer via the PROXY protocol as well (the `proxy_protocol` parameter to the `listen` directive in both the `http {}` and `stream {}` blocks. + +NGINX terminates HTTPS traffic (the [`ssl_certificate`](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate) and [`ssl_certificate_key`](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate_key) directives) and proxies the decrypted data to a backend server: + +- For HTTP: `proxy_pass http://backend1;` +- For TCP: `proxy_pass backend.example.com:12345` + +It includes the client IP address and port with the [`proxy_set_header`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) directives. + +The [`$proxy_protocol_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_addr) variable specified in the [`log_format`](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) directive also passes the client’s IP address to the log for both HTTP and TCP. + +Additionally, a TCP server (the `stream {}` block) sends its own PROXY protocol data to its backend servers (the `proxy_protocol on` directive). diff --git a/content/nginx/admin-guide/mail-proxy/_index.md b/content/nginx/admin-guide/mail-proxy/_index.md new file mode 100644 index 000000000..38a6ab7ea --- /dev/null +++ b/content/nginx/admin-guide/mail-proxy/_index.md @@ -0,0 +1,9 @@ +--- +description: Documentation explaining how to configure NGINX and F5 NGINX Plus as a proxy + for various email protocols. +menu: + docs: + parent: NGINX Plus +title: Mail Proxy +weight: 1000 +--- diff --git a/content/nginx/admin-guide/mail-proxy/mail-proxy.md b/content/nginx/admin-guide/mail-proxy/mail-proxy.md new file mode 100644 index 000000000..42ecb4a03 --- /dev/null +++ b/content/nginx/admin-guide/mail-proxy/mail-proxy.md @@ -0,0 +1,252 @@ +--- +description: Simplify your email service and improve its performance with NGINX or + F5 NGINX Plus as a proxy for the IMAP, POP3, and SMTP protocols +docs: DOCS-423 +doctypes: +- task +title: Configuring NGINX as a Mail Proxy Server +toc: true +weight: 100 +--- + +This article will explain how to configure F5 NGINX Plus or NGINX Open Source as a proxy for a mail server or an external mail service. + +## Introduction + +NGINX can proxy IMAP, POP3 and SMTP protocols to one of the upstream mail servers that host mail accounts and thus can be used as a single endpoint for email clients. This may bring in a number of benefits, such as: + +- easy scaling the number of mail servers +- choosing a mail server basing on different rules, for example, choosing the nearest server basing on a client’s IP address +- distributing the load among mail servers + + +## Prerequisites + +- NGINX Plus (already includes the Mail modules necessary to proxy email traffic) or NGINX Open Source compiled the Mail modules using the `--with-mail` parameter for email proxy functionality and `--with-mail_ssl_module` parameter for SSL/TLS support: + + ```shell + ./configure --with-mail --with-mail_ssl_module --with-openssl=[DIR]/openssl-1.1.1 + ``` + +- IMAP, POP3 and/or SMTP mail servers or an external mail service + + +## Configuring SMTP/IMAP/POP3 Mail Proxy Servers + +In the NGINX configuration file: + +1. Create a top-level [mail](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#mail) context (is defined at the same level as the [http](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) context): + + ```nginx + mail { + #... + } + ``` + +2. Specify the name for your mail server with the [server_name](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#server_name) directive: + + ```nginx + mail { + server_name mail.example.com; + #... + } + ``` + +3. Specify the HTTP authentication server with the [auth_http](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#auth_http) directive. The authentication server will authenticate email clients, choose an upstream server for email processing, and report errors. See [Setting up Authentication for a Mail Proxy](#mail_auth). + + ```nginx + mail { + server_name mail.example.com; + auth_http localhost:9000/cgi-bin/nginxauth.cgi; + #... + } + ``` + +4. Alternatively, specify whether to inform a user about errors from the authentication server by specifying the [proxy_pass_error_message](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_pass_error_message) directive. This may be handy when a mailbox runs out of memory: + + ```nginx + mail { + server_name mail.example.com; + auth_http localhost:9000/cgi-bin/nginxauth.cgi; + + proxy_pass_error_message on; + #... + } + ``` + +5. Configure each SMTP, IMAP, or POP3 server with the [server](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#server) blocks. For each server, specify: + - the _port number_ that correspond to the specified protocol with the [listen](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#listen) directive + - the _protocol_ with the [protocol](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#protocol) directive (if not specified, will be automatically detected from the port specified in the [listen](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#listen) directive) + - permitted _authentication methods_ with [imap_auth](https://nginx.org/en/docs/mail/ngx_mail_imap_module.html#imap_auth), [pop3_auth](https://nginx.org/en/docs/mail/ngx_mail_pop3_module.html#pop3_auth), and [smtp_auth](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html#smtp_auth) directives: + + ```nginx + server { + listen 25; + protocol smtp; + smtp_auth login plain cram-md5; + } + + server { + listen 110; + protocol pop3; + pop3_auth plain apop cram-md5; + } + + server { + listen 143; + protocol imap; + } + ``` + + +## Setting up Authentication for a Mail Proxy + +Each POP3/IMAP/SMTP request from the client will be first authenticated on an external HTTP authentication server or by an authentication script. Having an authentication server is obligatory for NGINX mail server proxy. The server can be created by yourself in accordance with the [NGINX authentication protocol](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol) which is based on the HTTP protocol. + +If authentication is successful, the authentication server will choose an upstream server and redirect the request. In this case, the response from the server will contain the following lines: + +```shell +HTTP/1.0 200 OK +Auth-Status: OK +Auth-Server: # the server name or IP address of the upstream server that will used for mail processing +Auth-Port: # the port of the upstream server +``` + +If authentication fails, the authentication server will return an error message. In this case, the response from the server will contain the following lines: + +```shell +HTTP/1.0 200 OK +Auth-Status: # an error message to be returned to the client, for example “Invalid login or password” +Auth-Wait: # the number of remaining authentication attempts until the connection is closed +``` + +Note that in both cases the response will contain _HTTP/1.0 200 OK_ which might be confusing. + +For more examples of requests to and responses from the authentication server, see the [ngx_mail_auth_http_module](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol) in [NGINX Reference documentation](https://nginx.org/en/docs/). + + +## Setting up SSL/TLS for a Mail Proxy + +Using POP3/SMTP/IMAP over SSL/TLS you make sure that data passed between a client and a mail server are secured. + +To enable SSL/TLS for the mail proxy: + +1. Make sure your NGINX is configured with SSL/TLS support by typing-in the `nginx -V` command in the command line and then looking for the `with --mail_ssl_module` line in the output: + + ```shell + $ nginx -V + configure arguments: ... with--mail_ssl_module + ``` + +2. Make sure you have obtained server certificates and a private key and put them on the server. A certificate can be obtained from a trusted certificate authority (CA) or generated using an SSL library such as OpenSSL. +3. Enable SSL/TLS for mail proxy with the [ssl](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl) directive. If the directive is specified in the [mail](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#mail) context, SSL/TLS will be enabled for all mail proxy servers. You can also enable STLS and STARTTLS with the [starttls](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#starttls) directive: + + ```nginx + ssl on; + ``` + + or + + ```nginx + starttls on; + ``` + +4. Add SSL certificates: specify the path to the certificates (which must be in the PEM format) with the [ssl_certificate](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_certificate) directive, and specify the path to the private key in the [ssl_certificate_key](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_certificate_key) directive: + + ```nginx + mail { + #... + ssl_certificate /etc/ssl/certs/server.crt; + ssl_certificate_key /etc/ssl/certs/server.key; + } + ``` + +5. You can use only strong versions and ciphers of SSL/TLS with the [ssl_protocols](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_protocols) and [ssl_ciphers](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_ciphers) directives, or you can set your own preferable protocols and ciphers: + + ```nginx + mail { + #... + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; + } + ``` + +### Optimizing SSL/TLS for Mail Proxy + +These hints will help you make your NGINX mail proxy faster and more secure: + +1. Set the number of worker processes equal to the number of processors with the [worker_processes](https://nginx.org/en/docs/ngx_core_module.html#worker_processes) directive set on the same level as the [mail](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#mail) context: + + ```nginx + worker_processes auto; + mail { + #... + } + ``` + +2. Enable the shared session cache and disable the built-in session cache with the [ssl_session_cache](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_cache) directive: + + ```nginx + worker_processes auto; + + mail { + #... + ssl_session_cache shared:SSL:10m; + #... + } + ``` + +3. Optionally, you may increase the session lifetime which is `5` minutes by default with the [ssl_session_timeout](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_timeout) directive: + + ```nginx + worker_processes auto; + + mail { + #... + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + #... + } + ``` + +## Complete Example + +```nginx +worker_processes auto; + +mail { + server_name mail.example.com; + auth_http localhost:9000/cgi-bin/nginxauth.cgi; + + proxy_pass_error_message on; + + ssl on; + ssl_certificate /etc/ssl/certs/server.crt; + ssl_certificate_key /etc/ssl/certs/server.key; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + server { + listen 25; + protocol smtp; + smtp_auth login plain cram-md5; + } + + server { + listen 110; + protocol pop3; + pop3_auth plain apop cram-md5; +} + + server { + listen 143; + protocol imap; + } +} +``` + +In this example, there are three email proxy servers: SMTP, POP3 and IMAP. Each of the servers is configured with SSL and STARTTLS support. SSL session parameters will be cached. + +The proxy server uses the HTTP authentication server – its configuration is beyond the scope of this article. All error messages from the server will be returned to clients. diff --git a/content/nginx/admin-guide/monitoring/_index.md b/content/nginx/admin-guide/monitoring/_index.md new file mode 100644 index 000000000..d1ed56fc3 --- /dev/null +++ b/content/nginx/admin-guide/monitoring/_index.md @@ -0,0 +1,9 @@ +--- +description: Documentation explaining how to monitor, generate logs for, and debug + NGINX and F5 NGINX Plus. +menu: + docs: + parent: NGINX Plus +title: Monitoring +weight: 700 +--- diff --git a/content/nginx/admin-guide/monitoring/debugging.md b/content/nginx/admin-guide/monitoring/debugging.md new file mode 100644 index 000000000..1d76d9a03 --- /dev/null +++ b/content/nginx/admin-guide/monitoring/debugging.md @@ -0,0 +1,405 @@ +--- +description: Troubleshoot problems and track down bugs in an NGINX or F5 NGINX Plus deployment, + with the debugging binary, debug logging, and core dumps. +docs: DOCS-424 +doctypes: +- task +title: Debugging NGINX +toc: true +weight: 300 +--- + +## Introduction + +Debugging helps to identify a bug in the program code if something goes wrong. It is generally used in developing or testing third-party or experimental modules. + +NGINX debugging features include the debugging log and creation of a core dump file with its further backtrace. + + +### Configuring NGINX Binary For Debugging + +First, you will need to enable debugging in NGINX binary. NGINX Plus already provides you with _nginx-debug_ binary while NGINX Open Source requires recompilation. + +### Configuring F5 NGINX Plus Binary + +Starting from [Release 8]({{< relref "../../releases.md" >}}), NGINX Plus ships the _nginx-debug_ binary together with the standard binary. To enable debugging in NGINX Plus, you will need to switch from _nginx_ to _nginx-debug_ binary. Open terminal and run the command: + +```shell +service nginx stop && service nginx-debug start +``` + +When finished, [enable](#error_log) the debugging log in the configuration file. + + +### Compiling NGINX Open Source Binary + +To enable debugging in NGINX Open Source, you will need to recompile it with the `--with-debug` flag specified in the configure script. + +To compile NGINX Open Source with the debug support: + +1. Download and unpack NGINX source files, go to the directory with the source files. See [Downloading the Sources]({{< relref "../installing-nginx/installing-nginx-open-source.md" >}}). +2. Get the list of NGINX configure arguments. Run the command: + + ```shell + nginx -V 2>&1 | grep arguments + ``` + +3. Add the `--with-debug` option to the list of configure commands and run the configure script: + + ```shell + ./configure --with-debug + ``` + +4. Compile and install NGINX: + + ```shell + sudo make + sudo make install + ``` + +5. Restart NGINX. + + +### NGINX and Debugging Symbols + +Debug symbols helps obtain additional information for debugging, such as functions, variables, data structures, source file and line number information. + +NGINX by default is compiled with the “_-g_” flag that includes debug symbols. + +However, if you get the “No symbol table info available” error when you run a [backtrace](#backtrace), then debugging symbols are missing and you will need to recompile NGINX with support of debugging symbols. + +The exact set of compiler flags depends on the compiler. For example, for the GCC compiler system: + +- include debugging symbols with the “_-g_” flag +- make the debugger output easier to understand by disabling compiler optimization with the “_-O0_” flag: + + ```shell + ./configure --with-debug --with-cc-opt='-O0 -g' ... + ``` + + +## Enabling Debug Logging in NGINX Configuration + +The debugging log records errors and any debugging-related information and is disabled by default. To enable it, make sure NGINX is compiled to support debugging (see [Configuring NGINX Binary For Debugging](#enable)) and then enable it in NGINX configuration file with the `debug` parameter of the [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log) directive. The debugging log may be written to a file, an allocated _buffer_ in memory, _stderr_ output, or to _syslog_. + +It is recommended enabling the debugging log on the ”_main_“ level of NGINX configuration to get the full picture of what’s going on. + + +### Writing the Debugging Log to a File + +Writing the debugging log to a file may slow down performance under high load. Also note that the file can grow very large and quickly eat up disk space. To reduce the negative impact, you can configure the debugging log to be written into a memory buffer, or set the debugging log for particular IP addresses. See [Writing the Debugging Log to Memory](#error_log_memory) and [Debug Log for Selected IPs](#error_log_ip) for details. + +To enable writing the debugging log to a file: + +1. Make sure your NGINX is configured with the `--with-debug` configuration option. Run the command and check if the output contains the `--with-debug` line: + + ```shell + nginx -V 2>&1 | grep -- '--with-debug' + ``` + +2. Open NGINX configuration file: + + ```shell + sudo vi /etc/nginx/nginx.conf + ``` + +3. Find the [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log) directive which is by default located in the `main` context, and change the logging level to `debug`. If necessary, change the path to the log file: + + ```shell + error_log /var/log/nginx/error.log debug; + ``` + +4. Save the configuration and exit the configuration file. + + +### Writing the Debugging Log to Memory + +The debugging log can be written to a memory using a cyclic buffer. The advantage is that logging on the debug level will not have significant impact on performance under high load. + +To enable writing the debug log to memory: + +1. Make sure your NGINX is configured with the `--with-debug` configuration option. Run the command and check if the output contains the `--with-debug` line: + + ```shell + nginx -V 2>&1 | grep -- '--with-debug' + ``` + +2. In NGINX configuration file, enable a memory buffer for debug logging with the [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log) directive specified in the `main` context: + + ```nginx + error_log memory:32m debug; + ... + http { + ... + } + ``` + +#### Extracting Debug Log From Memory + +The log can be extracted from the memory buffer using a script executed in the GDB debugger. + +To extract the debugging log from memory: + +1. Obtain the PID of NGINX worker process: + + ```shell + ps axu |grep nginx + ``` + +2. Launch the GDB debugger: + + ```shell + sudo gdb -p + ``` + +3. Copy the script, paste it to GDB and press “Enter”. The script will save the log in the _debug_log.txt_ file located in the current directory: + + ```nginx + set $log = ngx_cycle->log + while $log->writer != ngx_log_memory_writer + set $log = $log->next + end + set $buf = (ngx_log_memory_buf_t *) $log->wdata + dump binary memory debug_log.txt $buf->start $buf->end + ``` + +4. Quit GDB by pressing CTRL+D. +5. Open the file “_debug_log.txt_” located in the current directory: + + ```shell + sudo less debug_log.txt + ``` + + +### Debug Log for Selected IPs + +It is possible to enable the debugging log for a particular IP address or a range of IP addresses. Logging particular IPs may useful in a production environment as it will not negatively affect performance. The IP address is specified in the [debug_connection](https://nginx.org/en/docs/ngx_core_module.html#debug_connection) directive within the [events](https://nginx.org/en/docs/ngx_core_module.html#events) block; the directive can be defined more than once: + +```nginx +error_log /path/to/log; +... +events { + debug_connection 192.168.1.1; + debug_connection 192.168.10.0/24; +} +``` + + +### Debug Log for Each Virtual Host + +Generally, the [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log) directive is specified in the `main` context and thus is applied to all other contexts including [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) and [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location). But if there is another `error_log` directive specified inside a particular `server` or a `location` block, the global settings will be overridden and such `error_log` directive will set its own path to the log file and the level of logging. + +To set up the debugging log for a particular virtual host, add the `error_log` directive inside a particular `server` block, in which set a new path to the log file and the `debug` logging level: + +```nginx +error_log /path1/to/log debug; +... +http { + ... + server { + error_log /path2/to/log debug; + ... + } +} +``` + +To disable the debugging log per a particular virtual host, specify the `error_log` directive inside a particular `server` block, and specify a path to the log file only: + +```nginx +error_log /path/to/log debug; +... +http { + ... + server { + error_log /path/to/log; + ... + } +} +``` + + +## Enabling Core Dumps + +A core dump file can help identify and fix problems that are causing NGINX to crash. A core dump file may contain sensitive information such as passwords and private keys, so ensure that they are treated securely. + +In order to create a core dump file, they must be enabled in both the operating system and the NGINX configuration file. + + +### Enabling Core Dumps in the Operating System + +Perform the following steps in your operating system: + +1. Specify a working directory in which a core dump file will be saved, for example, “_/tmp/cores_”: + + ```shell + mkdir /tmp/cores + ``` + +2. Make sure the directory is writable by NGINX worker process: + + ```shell + sudo chown root:root /tmp/cores + sudo chmod 1777 /tmp/cores + ``` + +3. Disable the limit for the maximum size of a core dump file: + + ```shell + sudo prlimit --core=unlimited:unlimited --pid $(cat /run/nginx.pid) + ``` + + If the operation ends up with “Cannot modify limit: operation not permitted”, run the command: + + ```shell + sudo sh -c "ulimit -c unlimited && exec su $LOGNAME" + ``` + +4. Enable core dumps for the _setuid_ and _setgid_ processes. + + For CentOS 7.0, Debian 8.2, Ubuntu 14.04, run the commands: + + ```shell + echo "/tmp/cores/core.%e.%p" | sudo tee /proc/sys/kernel/core_pattern + sudo sysctl -w fs.suid_dumpable=2 + sysctl -p + ``` + + For FreeBSD, run the commands: + + ```shell + sudo sysctl kern.sugid_coredump=1 + sudo sysctl kern.corefile=/tmp/cores/%N.core.%P + ``` + + +### Enabling Core Dumps in NGINX Configuration + +To enable core dumps in the NGINX configuration file: + +1. Open the NGINX configuration file: + + ```shell + sudo vi /usr/local/etc/nginx/nginx.conf + ``` + +2. Define a directory that will keep core dump files with the [working_directory](https://nginx.org/en/docs/ngx_core_module.html#working_directory) directive. The directive is specified on the _main_ configuration level: + + ```nginx + working_directory /tmp/cores/; + ``` + +3. Make sure the directory exists and is writable by NGINX worker process. Open terminal and run the commands: + + ```shell + sudo chown root:root /tmp/cores + sudo chmod 1777 /tmp/cores + ``` + +4. Specify the maximum possible size of the core dump file with the [worker_rlimit_core](https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_core) directive. The directive is also specified on the `main` configuration level. If the core dump file size exceeds the value, the core dump file will not be created. + + ```nginx + worker_rlimit_core 500M; + ``` + +Example: + +```nginx +worker_processes auto; +error_log /var/log/nginx/error.log debug; +working_directory /tmp/cores/; +worker_rlimit_core 500M; + +events { + ... +} + +http { + ... +} +``` + +With these settings, a core dump file will be created in the “_/tmp/cores/_” directory, and only if its size does not exceed 500 megabytes. + + +## Obtaining Backtrace From a Core Dump File + +Backtraces provide information from a core dump file about what was wrong when a program crashed. + +To get a backtrace from a core dump file: + +1. Open a core dump file with the GDB debugger using the pattern: + + ```shell + sudo gdb + ``` + +2. Type-in the “_backtrace_ command to get a stack trace from the time of the crash: + + ```nginx + (gdb) backtrace + ``` + +If the “_backtrace_” command resulted with the “No symbol table info available” message, you will need to recompile NGINX binary to include debugging symbols. See [NGINX and Debugging Symbols](#compile_symbols). + + +## Dumping NGINX Configuration From a Running Process + +You can extract the current NGINX configuration from the master process in memory. This can be useful when you need to: + +- verify which configuration has been loaded +- restore a previous configuration if the version on disk has been accidentally removed or overwritten + +The configuration dump can be obtained with a GDB script provided that your NGINX has the debug support. + +1. Make sure your NGINX is built with the debug support (the `--with-debug` configure option in the list of the configure arguments). Run the command and check if the output contains the `--with-debug` line: + + ```shell + nginx -V 2>&1 | grep -- '--with-debug' + ``` + +2. Obtain the PID of NGINX worker process: + + ```shell + ps axu | grep nginx + ``` + +3. Launch the GDB debugger: + + ```shell + sudo gdb -p + ``` + +4. Copy and paste the script to GDB and press “Enter”. The script will save the configuration in the _nginx_conf.txt_ file in the current directory: + + ```nginx + set $cd = ngx_cycle->config_dump + set $nelts = $cd.nelts + set $elts = (ngx_conf_dump_t*)($cd.elts) + while ($nelts-- > 0) + set $name = $elts[$nelts]->name.data + printf "Dumping %s to nginx_conf.txt\n", $name + append memory nginx_conf.txt \ + $elts[$nelts]->buffer.start $elts[$nelts]->buffer.end + end + ``` + +5. Quit GDB by pressing _CTRL+D_. +6. Open the file _nginx_conf.txt_ located in the current directory: + + ```shell + sudo vi nginx.conf.txt + ``` + +## Asking for help + +When asking for help with debugging, please provide the following information: + +1. NGINX version, compiler version, and configure parameters. Run the command: + + ```shell + nginx -V + ``` + +2. Current full NGINX configuration. See [Dumping NGINX Configuration From a Running Process](#configdump) +3. The debugging log. See [Enabling Debug Logging in NGINX Configuration](#error_log) +4. The obtained backtrace. See [Enabling Core Dumps](#coredump), [Obtaining Backtrace](#backtrace) diff --git a/content/nginx/admin-guide/monitoring/diagnostic-package.md b/content/nginx/admin-guide/monitoring/diagnostic-package.md new file mode 100644 index 000000000..9fbc41a6c --- /dev/null +++ b/content/nginx/admin-guide/monitoring/diagnostic-package.md @@ -0,0 +1,114 @@ +--- +description: This page describes how to trigger the automatic collection of data required + to troubleshoot issues in a NGINX or F5 NGINX Plus deployment. +docs: DOCS-1357 +doctypes: +- task +title: NGINX Diagnostic Package +toc: true +weight: 400 +--- + + +## Overview + +NGINX Diagnostic Package is used to obtain additional information needed by [F5 Technical Support](https://account.f5.com/myf5) when troubleshooting your issue. + +The package is created by a script that can be [downloaded](https://nginx.org/download/nginx-supportpkg.sh) from the [nginx.org](https://nginx.org/download/) website. + +The script collects the following information: + +- host commands such as `ps`, `lsof`, `vmstat` +- NGINX configuration files +- NGINX log files +- NGINX service information +- NGINX process information +- NGINX versions, dynamically linked libraries +- NGINX Plus API endpoints +- NGINX Agent logs and configs if NGINX Agent is present +- NGINX App Protect logs and configs if F5 NGINX App Protect is present + +The script does not collect or create: + +- njs scripts +- Lua scripts +- Core dumps + +{{< note >}} +It is highly recommended that you review the script and the created resources and verify that they conform with your organization's data sharing policies. +{{< /note >}} + + + +## Supported Operating Systems + +The script can be run on most [operating systems supported by NGINX](https://docs.nginx.com/nginx/technical-specs/) and has been tested on the following operating systems: + +- AlmaLinux 9.1 +- Amazon Linux 2 +- CentOS 7 +- Debian 11 +- RHEL 9.1 +- Rocky Linux 9.1 +- SUSE Linux Enterprise Server 15 +- Ubuntu 20.04 + + + +## Usage + +To create NGINX Diagnostic Package: + +1. [Download](https://nginx.org/download/nginx-supportpkg.sh) the `nginx-supportpkg.sh` script: + + ```shell + wget https://nginx.org/download/nginx-supportpkg.sh + ``` + +2. Grant execution permissions to the script: + + ```shell + chmod +x nginx-supportpkg.sh + ``` + +3. Run the script. The script requires root privileges to run. The script can be run with optional arguments, see [Arguments](#arguments) for details. + + ```shell + sudo ./nginx-supportpkg.sh + ``` + + The created package will be located in the same directory as the current script. It is a `.tar.gz` archive named according to the file name pattern: `support-pkg-.tar.gz`. + +4. After the package has been created, it is recommended to extract and review its contents. Use the `tar` command to extract the archive: + + ```shell + tar -xvf support-pkg-1682457903.tar.gz + ``` + + The archive contains textual output of all the commands run by the script to make it easier to review the collected data. + + + +## Arguments + +The following table lists the arguments you can use to customize the data that needs to be collected based on your NGINX deployment. + +{{}} + +| Short | Long | Description | Example | Default | +| ----- | ------------------------ | ----------------------------------------------------------------------| -------------------------| -----------------| +| `-h` | `--help` | Prints information about the script arguments to stdout. | `--help` | N/A | +| `-d` | `--debug` | Sets bash debug flag. | `--debug` | N/A | +| `-o` | `--output_dir` | The output directory where the tar archive is saved. | `-o ~/output` | `$(pwd)` | +| `-n` | `--nginx_log_path` | The directory where the NGINX log files are located. | `-n /var/log/nginx` | `/var/log/nginx` | +| `-xc` | `--exclude_nginx_configs`| Excludes all NGINX configuration files from the support package. | `--exclude_nginx_configs`| N/A | +| `-xl` | `--exclude_nginx_logs` | Excludes all NGINX log files from the support package. | `--exclude_nginx_logs` | N/A | +| `-ac` | `--exclude_agent_configs`| Excludes all NGINX Agent configuration files from the support package.| `--exclude_agent_configs`| N/A | +| `-al` | `--exclude_agent_logs` | Excludes all NGINX Agent logs from the support package. | `--exclude_agent_logs` | N/A | +| `-nc` | `--exclude_nap_configs` | Excludes all NGINX App Protect config files from the support package. | `--exclude_nap_configs` | N/A | +| `-nl` | `--exclude_nap_logs` | Excludes all NGINX App Protect log files from the support package. | `--exclude_nap_logs` | N/A | +| `-ea` | `--exclude_api_stats` | Excludes NGINX Plus API stats from the support package. | `--exclude_api_stats` | N/A | +| `-pi` | `--profile_interval` | Profiling interval in seconds. | `-pi 20` | 15 | + +{{}} + diff --git a/content/nginx/admin-guide/monitoring/live-activity-monitoring.md b/content/nginx/admin-guide/monitoring/live-activity-monitoring.md new file mode 100644 index 000000000..77d637439 --- /dev/null +++ b/content/nginx/admin-guide/monitoring/live-activity-monitoring.md @@ -0,0 +1,630 @@ +--- +description: Track the performance of F5 NGINX Plus and your apps in real time, on the + built-in live activity monitoring dashboard or by feeding the JSON to other tools. +docs: DOCS-425 +doctypes: +- task +title: Live Activity Monitoring +toc: true +weight: 100 +--- + + + +This article describes how to configure and use runtime monitoring services in NGINX Plus: the interactive Dashboard and NGINX Plus REST API. + + +## About Live Activity Monitoring + +NGINX Plus provides various monitoring tools for your server infrastructure: + +- the interactive Dashboard page available since NGINX Plus Release 9 - a real-time live activity monitoring interface that shows key load and performance metrics of your server infrastructure. + +- NGINX REST API available since NGINX Plus Release 14 - an interface that can obtain extended status information, reset statistics, manage upstream servers on-the-fly, and manage key-value store. With the API you can connect NGINX Plus status information with third-party tools that support the JSON interface, for example, NewRelic or your own dashboard. + + > **Note**: Prior to NGINX Plus R14, gathering statistics and management of upstream servers in the Dashboard was performed with the [status](https://nginx.org/en/docs/http/ngx_http_status_module.html#status) and [upstream_conf](https://nginx.org/en/docs/http/ngx_http_upstream_conf_module.html) modules. Now the extended [status](https://nginx.org/en/docs/http/ngx_http_status_module.html#status) and [upstream_conf](https://nginx.org/en/docs/http/ngx_http_upstream_conf_module.html) modules are superseded by the [api](https://nginx.org/en/docs/http/ngx_http_api_module.html) module. Starting from R16, the [status](https://nginx.org/en/docs/http/ngx_http_status_module.html#status) and [upstream_conf](https://nginx.org/en/docs/http/ngx_http_upstream_conf_module.html) modules will be removed and completely superseded with the [api](https://nginx.org/en/docs/http/ngx_http_api_module.html) module. + +* * * + +[![live activity monitoring](/nginx/images/nginx-plus-dashboard-r30-overview-2.png)](https://demo.nginx.com/dashboard.html "Live status metrics from NGINX Plus") + +* * * + + +## Prerequisites + +- NGINX Plus R14 and later for NGINX Plus REST API and the Dashboard +- Data for statistics (see [Gathering Data to Appear in Statistics](#status_data)) + + +## Gathering Data to Appear in Statistics + +In order to collect data from virtual servers, upstream server groups, or cache zones, you will need to *enable shared memory zones* for the objects you want to collect data for. A shared memory zone stores configuration and runtime state information referenced by NGINX worker processes. + +- To make [HTTP]({{< relref "../load-balancer/http-load-balancer.md" >}}) and [TCP]({{< relref "../load-balancer/tcp-udp-load-balancer.md" >}}) server to appear in statistics, specify the [`status_zone`](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone) directive. The same zone name can be specified more than once for many [`server`](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) blocks. Since R19, the [status_zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone) directive can also be specified for [`location`](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) blocks - in this case, the statistics will be aggregated separately for servers and locations in the Dashboard: + + ```nginx + server { + # ... + status_zone status_page; + location / { + proxy_pass http://backend; + status_zone location_zone; + } + } + ``` + +- To make an upstream server group to appear in statistics, specify the [`zone`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) directive per each [`upstream`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) block: + + ```nginx + upstream backend { + zone backend 64k; + server backend1.example.com; + server backend2.example.com; + } + ``` + +- To make cache appear in statistics, make sure that caching is enabled in your configuration. A shared memory zone for caching is specified in the [`proxy_cache_path`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path), [`fastcgi_cache_path`](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_path), [`scgi_cache_path`](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_path), or [`uwsgi_cache_path`](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_path">uwsgi_cache_path) directive in the `keys_zone` parameter. See [NGINX Content Caching]({{< relref "../content-cache/content-caching.md" >}}) for more information: + + ```nginx + http { + # ... + proxy_cache_path /data/nginx/cache keys_zone=one:10m; + } + ``` + +- To make health checks appear in statistics, make sure that health checks are enabled with the [`health_check`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html) directive and the server group resides in the [shared memory](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). See [HTTP Health Checks]({{< relref "../load-balancer/http-health-check.md" >}}) and [TCP Health Checks]({{< relref "/nginx/admin-guide/load-balancer/tcp-health-check.md" >}}) for more information. + + ```nginx + server { + # ... + status_zone status_page; + location / { + proxy_pass http://backend; + health_check; + } + } + ``` + +- To make cluster information appear in the Dashboard, make sure that F5 NGINX Plus instances are organized in the cluster and zone synchronization is enabled on each instance. See [Runtime State Sharing in a Cluster](https://docs.nginx.com/nginx/admin-guide/high-availability/zone_sync/) for details. + +- To make resolver statistics appear in the Dashboard, specify the [`status_zone`](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone) parameter of the [`resolver`](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) directive: + + ```nginx + resolver 192.168.33.70 status_zone=resolver-zone1; + + server { + # ... + } + ``` + +- When finished, save and exit configuration file. +- Test the configuration and reload NGINX Plus: + + ```shell + sudo nginx -t && sudo nginx -s reload + ``` + + +## Configuring the API + +To enable the API: + +- In the [`http`](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) context, specify a [`server`](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) block that will be responsible for the API: + + ```nginx + http { + server { + # your api configuration will be here + } + } + ``` + +- Create a [`location`](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) for API requests and specify the [`api`](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) directive in this location: + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + api; + # ... + } + } + } + ``` + +- In order to make changes with the API, such as [resetting statistics counters](#json_delete), managing [upstream servers on-the-fly]({{< relref "../load-balancer/dynamic-configuration-api.md" >}}) or [key-value storage]({{< relref "/nginx/admin-guide/security-controls/denylisting-ip-addresses.md" >}}), managing upstream servers from the [Dashboard](#dashboard_upstream), enable the read-write mode for the API by specifying the `write=on` parameter for the [`api`](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) directive: + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + api write=on; + # ... + } + } + } + ``` + +- It is recommended restricting access to the API location, for example, allow access only from local networks with [`allow`](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow) and [`deny`](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny) directives: + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + api write=on; + allow 192.168.1.0/24; + deny all; + } + } + } + ``` + +- It is also recommended restricting access to `PATCH`, `POST`, and `DELETE` methods to particular users. This can be done by implementing [`HTTP basic authentication`](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html): + + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + api write=on; + allow 192.168.1.0/24; + deny all; + } + } + } + ``` + +- Enable the [Dashboard](#dashboard) by specifying the `/dashboard.html` location. By default the Dashboard is located in the root directory (for example, `/usr/share/nginx/html`) specified by the [`root`](https://nginx.org/en/docs/http/ngx_http_core_module.html#root) directive: + + ```nginx + http { + # ... + server { + listen 192.168.1.23; + # ... + location /api { + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + api write=on; + allow 192.168.1.0/24; + deny all; + } + location = /dashboard.html { + root /usr/share/nginx/html; + } + } + } + ``` + +- As an option you can try the [Swagger UI](#swagger_enable) - an interactive documentation tool for the API specification supplied in a OpenAPI YAML file and used with NGINX Plus. +Download the Swagger UI and the OpenAPI YAML specification, specify a [`location`](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) for them, for example, */swagger-ui*, the path to its files with the [`root`](https://nginx.org/en/docs/http/ngx_http_core_module.html#root) directive, for example, */usr/share/nginx/html* and limit access to local networks with [`allow`](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow) and [`deny`](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny) directives. See [The Swagger UI](#the-swagger-ui) section for details. + + ```nginx + http { + # ... + + server { + listen 192.168.1.23; + # ... + + location /api { + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + + api write=on; + allow 192.168.1.0/24; + deny all; + } + + location = /dashboard.html { + root /usr/share/nginx/html; + } + + location /swagger-ui { + add_header Content-Security-Policy "default-src 'self'"; + root /usr/share/nginx/html; + allow 192.168.1.0/24; + deny all; + } + } + } + ``` + + +## Using the Dashboard + +NGINX Plus Dashboard provides a real-time live activity monitoring interface that shows key load and performance metrics of your server infrastructure. + + +### Accessing the Dashboard + +In the address bar of your browser, type-in the address that corresponds to your Dashboard page (in our example `http://192.168.1.23/dashboard.html`). This will display the Dashboard page located at `/usr/share/nginx/html` as specified in the `root` directive. + +There is also a live demo page from NGINX available at [demo.nginx.com/dashboard.html](https://demo.nginx.com/dashboard.html): + +[![live activity monitor](/nginx/images/nginx-plus-dashboard-r30-overview-2.png)](https://demo.nginx.com/dashboard.html "Live load-balancing status from NGINX Plus") + + +### Tabs Overview + +All information in NGINX Plus Dashboard is represented in tabs. + +![The row of tabs at the top of the window on the NGINX Plus dashboard make it easy to drill down to more detailed information about server zones, upstream groups, or the cache](/nginx/images/dashboard-tabs.png) + +The **HTTP Zones** tab gives detailed statistics on the frontend performance. Statistics are shown per each [`server`](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [`location`](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) and [`limit_req`](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html) zones in the [`http`](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) context. For NGINX Plus to collect information for each server, you must include the [`status_zone`](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone) directive in each `server` or `location` block. To include charts for `limit_req` limiting, you must configure the [`limit_req_zone`](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) directive. + +![The 'HTTP zones' tab in the NGINX Plus live activity monitoring dashboard displays information about NGINX Plus' interaction with clients](/nginx/images/dashboard-tab-http-zones.png) + +TCP and UDP status zones with charts for connection limiting ([`limit_conn`](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html)) appear on the **TCP/UDP Zones** tab. + +![The 'TCP/UDP zones' tab in the NGINX Plus live activity monitoring dashboard](/nginx/images/dashboard-tab-tcp-zones.png) + +The **HTTP Upstreams** tab provides information about each upstream group for HTTP and HTTPS traffic. TCP and UDP upstream groups appear on the **TCP/UDP Upstreams** tab. For NGINX Plus to collect information for an upstream group, you must include the [`zone`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) directive in the [`upstream`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) configuration block. + +![The 'Upstreams' tab on the NGINX Plus live activity monitoring dashboard provides information about the servers in each upstream group for HTTP/HTTPS traffic](/nginx/images/dashboard-tab-http-upstreams.png) + +The **Caches** tab provides statistics about the caches configured in NGINX Plus. For NGINX Plus to collect information for an upstream group, you must [configure cache]({{< relref "../content-cache/content-caching.md" >}}). + +![The 'Caches' tab in the NGINX Plus live activity monitoring dashboard provides information about cache readiness, fullness, and hit ratio](/nginx/images/dashboard-tab-caches.png) + +The **Shared Zones** tab shows how much memory is used by each shared memory zone, including cache zones, SSL session cache, upstream zones, keyval zones, session log, sticky sessions, limit_conn and limit_req zones. + +![The 'Shared Zones' tab in the NGINX Plus live activity monitoring dashboard provides information about memory usage across all shared memory zones](/nginx/images/dashboard-tab-shared-zones.png) + +The **Cluster** tab provides the synchronization status of shared memory zones across all NGINX cluster nodes. See [Runtime State Sharing in a Cluster](https://docs.nginx.com/nginx/admin-guide/high-availability/zone_sync/) for details on how to organize NGINX instances in a cluster and configure synchronization between all cluster nodes. + +![The 'Cluster' tab in the NGINX Plus live activity monitoring dashboard provides synchronization information of shared memory zones of NGINX cluster nodes](/nginx/images/dashboard-tab-cluster.png) + +The **Resolvers** tab provides DNS server statistics of requests and responses per each DNS status zone. For NGINX Plus to collect information about your DNS servers, include the [`status_zone`](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone) parameter in the [`resolver`](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) directive. + +![The 'Resolvers' tab in the NGINX Plus live activity monitoring dashboard provides information about cache readiness, fullness, and hit ratio](/nginx/images/dashboard-tab-resolvers.png) + +The **Workers** tab provides information about worker processes and shows per-worker connection statistics. + +![The 'Workers' tab in the NGINX Plus live activity monitoring dashboard provides information about worker processes](/nginx/images/dashboard-tab-workers.png) + + + +### Managing Upstream Servers from the Dashboard + +You can add new or modify and remove upstream servers directly from the Dashboard interface. Note that you must previously enable the [`api`](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) in the write mode. + +In the **Upstreams** or **TCP/UDP Upstreams** tab, click the pencil icon next to the server name and choose between **Edit selected** and **Add server** buttons: + +![In editing mode on the 'Upstreams' tab in the NGINX Plus live activity monitoring dashboard, you can add, remove, or modify servers](https://cdn.wp.nginx.com/wp-content/uploads/2015/09/Screen-Shot-2015-09-08-at-10.31.14-AM.png) + +To add an upstream server, click **Add server**: + +![The 'Add server' interface for adding servers to an upstream group in the NGINX Plus live activity monitoring dashboard](https://cdn.wp.nginx.com/wp-content/uploads/2015/09/Screen-Shot-2015-09-08-at-10.47.22-AM.png) + +To remove or modify an upstream server, click the box to the left of each server’s name, then click **Edit selected**: + +![The 'Edit selected' interface for modifying or removing servers in an upstream group in the NGINX Plus live activity monitoring dashboard](/nginx/images/dashboard-r7-edit-server-interface.png) + +When finished, click the **Save** button to save the changes. + + +### Configuring Dashboard Options +You can configure the threshold for Dashboard warnings and alerts by clicking the Gear button in the Tabs menu: + +![The 'Dashboard configuration' interface for modifying Dashboard settings](/nginx/images/dashboard-options.png) + +**Update every N sec** - updates the Dashboard data after the specified number of seconds, default is `1` second. + +**4xx warnings threshold** - represents the ratio between the numbers of `Total` requests and `4xx` errors for `HTTP Upstreams` and `HTTP Zones`. Default is `30%`. + +**Calculate hit ratio for the past N sec** - represents all cache hits within the specified number of seconds for `Caches`. Default is `300` seconds. + +**Not synced data threshold** - represents the ratio between `Pending` records and `Total` records for `Clusters`. Default is `70%`. + +**Resolver errors threshold** - represents the ratio between `Requests` and resolver errors within the time frame specified in **Update every N sec** for `Resolvers`. Default is `3%`. + + + +## Using the REST API + +With NGINX Plus, statistics of your server infrastructure can be managed with the REST API interface. The API is based on standard HTTP requests: statistics can be obtained with `GET` requests and reset with `DELETE` requests. Upstream servers can be added with `POST` requests and modified with `PATCH` requests. See [Managing Upstream Servers with the API]({{< relref "../load-balancer/dynamic-configuration-api.md" >}}) for more information. + +The requests are sent in the JSON format that allows you to connect the stats to monitoring tools or dashboards that support JSON. + + +### Getting statistics with the API + +The status information of any element can be accessed with a slash-separated URL. The URL may look as follows: + +[`https://demo.nginx.com/api/9/http/caches/http_cache?fields=expired,bypass`](https://demo.nginx.com/api/9/http/caches/http_cache?fields=expired,bypass) + +where: + +- `/api` is the location you have configured in the NGINX configuration file for the API +- `/9` is the API version, the current API version is `9` +- `/http/caches/http_cache` is the path to the resource +- `?fields=expired,bypass` is an optional argument that specifies which fields of the requested object will be output + +The requested information is returned in the JSON data format. + +To get the list of all available rootpoints, send the `GET` request with the 'curl' command in terminal (in the example, JSON pretty print extension "json_pp" is used): + +```shell +curl -s 'https://demo.nginx.com/api/9/' | json_pp +``` + +The JSON data returned: + +```json +[ + "nginx", + "processes", + "connections", + "slabs", + "workers", + "http", + "stream", + "resolvers", + "ssl" + "workers" +] +``` + +To get the statistics for a particular endpoint, for example, obtain general information about NGINX, send the following `GET` request: + +```shell +curl -s 'https://demo.nginx.com/api/9/nginx' | json_pp +``` + +The JSON data returned: + +```json +{ + "version" : "1.25.1", + "build" : "nginx-plus-r30", + "address" : "206.251.255.64", + "generation" : 14, + "load_timestamp" : "2023-08-15T10:00:00.114Z", + "timestamp" : "2023-08-15T14:06:36.475Z", + "pid" : 2201, + "ppid" : 92033 +} +``` + +You can specify which fields of the requested object will be output with the optional *fields* argument in the request line. For example, to display only NGINX Plus version and build, specify the command: + +```shell +curl -s 'https://demo.nginx.com/api/9/nginx?fields=version,build' | json_pp +``` + +The JSON data returned: + +```json +{ + "version" : "1.25.1", + "build" : "nginx-plus-r30" +} +``` + +For a complete list of available endpoints and supported methods see [reference documentation](https://nginx.org/en/docs/http/ngx_http_api_module.html). + + +### Resetting the statistics + +Resetting the statistics is performed by sending an API command with the `DELETE` method to a target endpoint. Make sure that your API is configured in the read-write mode. + +You can reset the following types of statistics: + +- abnormally terminated and respawned child processes +- accepted and dropped client connections +- SSL handshakes and session reuses +- the `reqs` and `fails` metrics for each memory slot +- total client HTTP requests +- accepted and discarded requests, responses, received and sent bytes in a particular HTTP server zone +- cache hits and cache misses in a particular cache zone +- statistics for a particular HTTP or stream upstream server in an upstream server group + +For example, to reset the number of abnormally terminated and respawned child processes, you can perform the following command in the terminal via curl: + +```shell +curl -X DELETE -i 'http://192.168.1.23/api/9/processes' +``` + +To reset accepted and dropped client connections perform the following command: + +```shell +curl -X DELETE -i 'http://192.168.1.23/api/9/connections' +``` + + +### Managing Upstream Servers with the API + +The NGINX Plus REST API supports `POST` and `PATCH` HTTP methods to dynamically add a server to the upstream group or modify server parameters. + +To dynamically change the configuration of an upstream group, send an HTTP request with the appropriate API method. The following examples use the `curl` command, but any mechanism for making HTTP requests is supported. All request bodies and responses are in JSON format. + +The URI specifies the following information in this order: + +- The hostname or IP address of the node that handles the request (in the following examples, **192.168.1.23**) +- The location where the `api` directive appears (**api**) +- The API version (**9**) +- The name of the upstream group, complete its place in the NGINX Plus configuration hierarchy represented as a slash-separated path (**http/upstreams/appservers**) + +For example, to add a new server to the **appservers** upstream group, send the following `curl` command: + +```shell +curl -X POST -d '{ \ + "server": "10.0.0.1:8089", \ + "weight": 4, \ + "max_conns": 0, \ + "max_fails": 0, \ + "fail_timeout": "10s", \ + "slow_start": "10s", \ + "backup": true, \ + "down": true \ + }' -s 'http://192.168.1.23/api/9/http/upstreams/appservers/servers' +``` + +To remove a server from the upstream group: + +```shell +curl -X DELETE -s 'http://192.168.1.23/api/9/http/upstreams/appservers/servers/0' +``` + +To set the `down` parameter for the first server in the group (with ID `0`): + +```shell +curl -X PATCH -d '{ "down": true }' -s 'http://192.168.1.23/api/9/http/upstreams/appservers/servers/0' +``` + + +## OpenAPI Specification + +NGINX Plus allows you to explore the REST API documentation and send API commands with a graphical user interface. This can be done with the NGINX Plus OpenAPI specification in YAML format and the Swagger UI. + +The main purpose of Swagger UI and the YAML OpenAPI spec is to document and visualize NGINX API commands. For security reasons it is not recommended using it in a production environment. + +Prior to [NGINX Plus Release 25](https://docs.nginx.com/nginx/releases/#nginxplusrelease-25-r25), the Swagger UI was shipped together with NGINX Plus packages. Since [NGINX Plus Release 26](https://docs.nginx.com/nginx/releases/#nginxplusrelease-26-r26), the OpenAPI YAML specification and the Swagger UI is published separately, below. + +Alternatively, copy the link to the appropriate YAML file, and import into your preferred OpenAPI v2 tool. + + + +### Enabling the Swagger UI + + +To enable the Swagger UI: + +1. Install and configure the Swagger UI. The installation package and instructions can be found on the [Swagger UI page](https://swagger.io/tools/swagger-ui/download/). + +2. Choose the version of the OpenAPI YAML file that matches your version of NGINX Plus, download the file, and put it to the folder containing the Swagger UI files: + +{{}} + +|OpenAPI YAML File/API Version | NGINX Plus Version | Changes | +| ---| --- | --- | +|[{{}}OpenAPI v2](../../yaml/v9/nginx_api.yaml) for API version 9 | NGINX Plus Releases [30](https://docs.nginx.com/nginx/releases/#nginxplusrelease-30-r30) | The [`/workers/`](https://nginx.org/en/docs/http/ngx_http_api_module.html#workers_) data were added| +|[{{}}OpenAPI v2](../../yaml/v8/nginx_api.yaml) for API version 8 | NGINX Plus Releases [27](https://docs.nginx.com/nginx/releases/#nginxplusrelease-27-r27), [28](https://docs.nginx.com/nginx/releases/#nginxplusrelease-28-r28), [29](https://docs.nginx.com/nginx/releases/#nginxplusrelease-29-r29) | SSL statistics for each HTTP [upstream](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream) and stream [upstream](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_upstream), SSL statistics for each HTTP [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_server_zone) and stream [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_server_zone), extended statistics for [SSL](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_ssl_object) endpoint| +|[{{}}OpenAPI v2](../../yaml/v7/nginx_api.yaml) for API version 7 | NGINX Plus Releases [25](https://docs.nginx.com/nginx/releases/#nginxplusrelease-25-r25), [26](https://docs.nginx.com/nginx/releases/#nginxplusrelease-26-r26)| The `codes` data in `responses` for each HTTP [upstream](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream), [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_server_zone), and [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_location_zone) were added| +|[{{}}OpenAPI v2](../../yaml/v6/nginx_api.yaml) for API version 6 | NGINX Plus Releases [20](https://docs.nginx.com/nginx/releases/#nginxplusrelease-20-r20), [21](https://docs.nginx.com/nginx/releases/#nginxplusrelease-21-r21), [22](https://docs.nginx.com/nginx/releases/#nginxplusrelease-22-r22), [23](https://docs.nginx.com/nginx/releases/#nginxplusrelease-23-r23), [24](https://docs.nginx.com/nginx/releases/#nginxplusrelease-24-r24) | The [`/stream/limit_conns/`](https://nginx.org/en/docs/http/ngx_http_api_module.html#stream_limit_conns_), [`/http/limit_conns/`](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_limit_conns_), and [`/http/limit_reqs/`](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_limit_reqs_) data were added | +|[{{}}OpenAPI v2](../../yaml/v5/nginx_api.yaml) for API version 5 | [NGINX Plus Release 19](https://docs.nginx.com/nginx/releases/#nginxplusrelease-19-r19) | The `expire` parameter of a [key-value](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) pair can be [set](https://nginx.org/en/docs/http/ngx_http_api_module.html#postHttpKeyvalZoneData) or [changed](https://nginx.org/en/docs/http/ngx_http_api_module.html#patchHttpKeyvalZoneKeyValue), the [`/resolvers/`](https://nginx.org/en/docs/http/ngx_http_api_module.html#resolvers_) and [`/http/location_zones/`](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_location_zones_) data were added | +|[{{}}OpenAPI v2](../../yaml/v4/nginx_api.yaml) for API version 4 | [NGINX Plus Release 18](https://docs.nginx.com/nginx/releases/#nginxplusrelease-18-r18) | The `path` and `method` fields of [nginx error object](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_error) were removed. These fields continue to exist in earlier api versions, but show an empty value | +|[{{}}OpenAPI v2](../../yaml/v3/nginx_api.yaml) for API version 3 | NGINX Plus Releases [15](https://docs.nginx.com/nginx/releases/#nginxplusrelease-15-r15), [16](https://docs.nginx.com/nginx/releases/#nginxplusrelease-16-r16), [17](https://docs.nginx.com/nginx/releases/#nginxplusrelease-17-r17) | The [`/stream/zone_sync/`](https://nginx.org/en/docs/http/ngx_http_api_module.html#stream_zone_sync_) data were added | +|[{{}}OpenAPI v2](../../yaml/v2/nginx_api.yaml) for API version 2 | [NGINX Plus Release 14](https://docs.nginx.com/nginx/releases/#nginxplusrelease-14-r14) | The [`drain`](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream_conf_server) parameter was added | +|[{{}}OpenAPI v2](../../yaml/v1/nginx_api.yaml) for API version 1 | [NGINX Plus Release 13](https://docs.nginx.com/nginx/releases/#nginx-plus-release-13-r13) | The [`/stream/keyvals/`](https://nginx.org/en/docs/http/ngx_http_api_module.html#stream_keyvals_) data were added | + +{{}} + +3. Configure NGINX Plus to work with the Swagger UI. Create a [`location`](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), for example, */swagger-ui*: + + ```nginx + location /swagger-ui { + # ... + } + ``` + +2. Specify the path to the Swagger UI files and the YAML spec with the [`root`](https://nginx.org/en/docs/http/ngx_http_core_module.html#root) directive, for example, to `usr/share/nginx/html`: + + ```nginx + location /swagger-ui { + root /usr/share/nginx/html; + # ... + } + ``` + + For NGINX Plus Release 25 and earlier, the Swagger UI is located in the root directory specified by the [`root`](https://nginx.org/en/docs/http/ngx_http_core_module.html#root) directive, for example, */usr/share/nginx/html*. + + +3. Restrict access to this location only from a local network with [`allow`](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow) and [`deny`](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny) directives: + + ```nginx + location /swagger-ui { + root /usr/share/nginx/html; + allow 192.168.1.0/24; + deny all; + } + ``` + +4. It is also recommended enabling Content Security Policy headers that define that all resources are loaded from the same origin as Swagger UI with the [`add_header`](https://nginx.org/en/docs/http/ngx_http_headers_module.html#add_header) directive: + + ```nginx + location /swagger-ui { + add_header Content-Security-Policy "default-src 'self'"; + root /usr/share/nginx/html; + allow 192.168.1.0/24; + deny all; + } + ``` + + +### Disabling the Swagger UI + +In NGINX Plus Release 25 and earlier, the Swagger UI is a part of NGINX Plus package and is installed by default. For [security reasons](https://support.f5.com/csp/article/K73710094), you may want to block access to the Swagger UI. One of the ways to do it is to [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) the `404` status code in response to the URL that matches the `/swagger-ui` location: + +```nginx +location /swagger-ui { + return 404; +} +``` + + +### Using the Swagger UI + +To access the Swagger UI page: + +- In the address bar of your browser, type-in the address of Swagger UI, in our example the address is **: + +![Swagger UI](/nginx/images/swagger-ui.png) + +- If you have configured the HTTPS protocol for the Swagger UI page, you will need to choose the "HTTPS" scheme in the "Schemes" menu. + +- Click on the operation you want to fulfil. + +- Click **Try it out**. + +- Fill in the obligatory fields, if required. Generally, the required field is the name of the shared memory zone. + +- As an option you can display only particular fields. In the "Fields" line specify the fields you want to be displayed separated by commas. If no fields are specified, then all fields are displayed. + +- Click **Execute**. The result and the corresponding HTTP error code will be displayed below the **Execute** command. + + +### API and Swagger UI Live Examples + +NGINX provides live examples of JSON data and Swagger UI on a demo website. + +Live example of JSON data is available at: + +You can send an API command with curl or with a browser: + +```shell +curl -s 'https://demo.nginx.com/api/9/' +curl -s 'https://demo.nginx.com/api/9/nginx?fields=version,build' +curl -s 'https://demo.nginx.com/api/9/http/caches/http_cache' +curl -s 'https://demo.nginx.com/api/9/http/upstreams/' +curl -s 'https://demo.nginx.com/api/9/http/upstreams/demo-backend' +curl -s 'https://demo.nginx.com/api/9/http/upstreams/demo-backend/servers/0' +``` + +The Swagger UI demo page is available at: + +[![Swagger UI](/nginx/images/swagger-ui.png)](https://demo.nginx.com/swagger-ui) + +Live examples operate in the read-only mode, resetting the statistics via the `DELETE` method and creating/modifying upstream servers with the `POST`/`PATCH` methods are not available. Also note that as the demo API is served over the HTTP protocol, it is required to choose the “HTTP” scheme in the “Schemes” menu on the [Swagger UI demo page](https://demo.nginx.com/swagger-ui/). diff --git a/content/nginx/admin-guide/monitoring/logging.md b/content/nginx/admin-guide/monitoring/logging.md new file mode 100644 index 000000000..c32dced2e --- /dev/null +++ b/content/nginx/admin-guide/monitoring/logging.md @@ -0,0 +1,210 @@ +--- +description: Capture detailed information about errors and request processing in log + files, either locally or via syslog. +docs: DOCS-426 +doctypes: +- task +title: Configuring Logging +toc: true +weight: 200 +--- + +This article describes how to configure logging of errors and processed requests in NGINX Open Source and NGINX Plus. + + +## Setting Up the Error Log + +NGINX writes information about encountered issues of different severity levels to the error log. The [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log) directive sets up logging to a particular file, `stderr`, or `syslog` and specifies the minimal severity level of messages to log. By default, the error log is located at **logs/error.log** (the absolute path depends on the operating system and installation), and messages from all severity levels above the one specified are logged. + +The configuration below changes the minimal severity level of error messages to log from `error` to `warn`: + +```nginx +error_log logs/error.log warn; +``` + +In this case, messages of `warn`, `error` `crit`, `alert`, and `emerg` levels are logged. + +The default setting of the error log works globally. To override it, place the [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log) directive in the `main` (top-level) configuration context. Settings in the `main` context are always inherited by other configuration levels (`http`, `server`, `location`). The `error_log` directive can be also specified at the [http](https://nginx.org/en/docs/http/ngx_http_core_module.html#http), [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream), `server` and [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) levels and overrides the setting inherited from the higher levels. In case of an error, the message is written to only one error log, the one closest to the level where the error has occurred. However, if several `error_log` directives are specified on the same level, the message are written to all specified logs. + +> **Note:** The ability to specify multiple `error_log` directives on the same configuration level was added in NGINX Open Source version [1.5.2](https://nginx.org/en/CHANGES). + + + +## Setting Up the Access Log + +NGINX writes information about client requests in the access log right after the request is processed. By default, the access log is located at **logs/access.log**, and the information is written to the log in the predefined **combined** format. To override the default setting, use the [log_format](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) directive to change the format of logged messages, as well as the [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) directive to specify the location of the log and its format. The log format is defined using variables. + +The following examples define the log format that extends the predefined **combined** format with the value indicating the ratio of gzip compression of the response. The format is then applied to a virtual server that enables compression. + +```nginx +http { + log_format compression '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" "$gzip_ratio"'; + + server { + gzip on; + access_log /spool/logs/nginx-access.log compression; + ... + } +} +``` + +Another example of the log format enables tracking different time values between NGINX and an upstream server that may help to diagnose a problem if your website experience slowdowns. You can use the following variables to log the indicated time values: + +- [`$upstream_connect_time`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_connect_time) – The time spent on establishing a connection with an upstream server +- [`$upstream_header_time`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_header_time) – The time between establishing a connection and receiving the first byte of the response header from the upstream server +- [`$upstream_response_time`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_response_time) – The time between establishing a connection and receiving the last byte of the response body from the upstream server +- [`$request_time`](https://nginx.org/en/docs/http/ngx_http_log_module.html#var_request_time) – The total time spent processing a request + +All time values are measured in seconds with millisecond resolution. + +```nginx +http { + log_format upstream_time '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent"' + 'rt=$request_time uct="$upstream_connect_time" uht="$upstream_header_time" urt="$upstream_response_time"'; + + server { + access_log /spool/logs/nginx-access.log upstream_time; + ... + } +} +``` + +When reading the resulting time values, keep the following in mind: + +- When a request is processed through several servers, the variable contains several values separated by commas +- When there is an internal redirect from one upstream group to another, the values are separated by semicolons +- When a request is unable to reach an upstream server or a full header cannot be received, the variable contains `0` (zero) +- In case of internal error while connecting to an upstream or when a reply is taken from the cache, the variable contains `-` (hyphen) + +Logging can be optimized by enabling the buffer for log messages and the cache of descriptors of frequently used log files whose names contain variables. To enable buffering use the `buffer` parameter of the [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) directive to specify the size of the buffer. The buffered messages are then written to the log file when the next log message does not fit into the buffer as well as in some other [cases](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log). + +To enable caching of log file descriptors, use the [open_log_file_cache](https://nginx.org/en/docs/http/ngx_http_log_module.html#open_log_file_cache) directive. + +Similar to the `error_log` directive, the [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) directive defined on a particular configuration level overrides the settings from the previous levels. When processing of a request is completed, the message is written to the log that is configured on the current level, or inherited from the previous levels. If one level defines multiple access logs, the message is written to all of them. + + + +## Enabling Conditional Logging + +Conditional logging allows excluding trivial or unimportant log entries from the access log. In NGINX, conditional logging is enabled by the `if` parameter to the [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) directive. + +This example excludes requests with HTTP status codes `2xx` (Success) and `3xx` (Redirection): + +```nginx +map $status $loggable { + ~^[23] 0; + default 1; +} + +access_log /path/to/access.log combined if=$loggable; +``` + + + +## Usecase: Sampling TLS Parameters + +Many clients use TLS versions older than TLS 1.3. Though many ciphers are declared insecure, older implementations still use them; ECC certificates offer greater performance than RSA, but not all clients can accept ECC. Many TLS attacks rely on a “man in the middle” who intercepts the cipher negotiation handshake and forces the client and server to select a less secure cipher. Therefore, it’s important to configure F5 NGINX Plus to not support weak or legacy ciphers, but doing so may exclude legacy clients. + +You can evaluate the SSL data obtained from the client and determine what proportion of clients get excluded if support for older SSL protocols and ciphers is removed. + +The following configuration example logs the SSL protocol, cipher, and `User-Agent` header of any connected TLS client, assuming that each client selects the most recent protocol and most secure ciphers it supports. + +In this example, each client is identified by its unique combination of IP address and User-Agent. + +1. Define the custom log format `sslparams` that includes the version of the SSL protocol ([`$ssl_protocol`](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#var_ssl_protocol)), ciphers used in the connection ([`$ssl_cipher`](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#var_ssl_cipher)), the client IP address ([`$remote_addr`](http://nginx.org/ru/docs/http/ngx_http_core_module.html#var_remote_addr)), and the value of standard `User Agent` HTTP request field (`$http_user_agent`): + + ```nginx + log_format sslparams '$ssl_protocol $ssl_cipher ' + '$remote_addr "$http_user_agent"'; + ``` + +2. Define a key-value storage that will keep the IP address of the client and its User Agent, for example, `clients`: + + ```nginx + keyval_zone zone=clients:80m timeout=3600s; + ``` + +3. Create a variable, for example, `$seen` for each unique combination of `$remote_addr` and `User-Agent` header: + + ```nginx + keyval $remote_addr:$http_user_agent $seen zone=clients; + + server { + listen 443 ssl; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; + + if ($seen = "") { + set $seen 1; + set $logme 1; + } + access_log /tmp/sslparams.log sslparams if=$logme; + + # ... + } + ``` + +4. View the log file generated with this configuration: + + ```none + TLSv1.2 AES128-SHA 1.1.1.1 "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0" + TLSv1.2 ECDHE-RSA-AES128-GCM-SHA256 2.2.2.2 "Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1" + TLSv1.2 ECDHE-RSA-AES128-GCM-SHA256 3.3.3.3 "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:58.0) Gecko/20100101 Firefox/58.0" + TLSv1.2 ECDHE-RSA-AES128-GCM-SHA256 4.4.4.4 "Mozilla/5.0 (Android 4.4.2; Tablet; rv:65.0) Gecko/65.0 Firefox/65.0" + TLSv1 AES128-SHA 5.5.5.5 "Mozilla/5.0 (Android 4.4.2; Tablet; rv:65.0) Gecko/65.0 Firefox/65.0" + TLSv1.2 ECDHE-RSA-CHACHA20-POLY1305 6.6.6.6 "Mozilla/5.0 (Linux; U; Android 5.0.2; en-US; XT1068 Build/LXB22.46-28) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/12.10.2.1164 Mobile Safari/537.36" + ``` + +5. Process the log file to determine the spread of data: + + ```shell + cat /tmp/sslparams.log | cut -d ' ' -f 2,2 | sort | uniq -c | sort -rn | perl -ane 'printf "%30s %s\n", $F[1], "="x$F[0];' + ``` + + In this output, low‑volume, less secure ciphers are identified: + + ```shell + ECDHE-RSA-AES128-GCM-SHA256 ========================= + ECDHE-RSA-AES256-GCM-SHA384 ======== + AES128-SHA ==== + ECDHE-RSA-CHACHA20-POLY1305 == + ECDHE-RSA-AES256-SHA384 == + ``` + + Then you can check the logs to determine which clients are using these ciphers and then make a decision about removing these ciphers from the NGINX Plus configuration. + + For more information about sampling requests with NGINX conditional logging see the [blog post](https://www.nginx.com/blog/sampling-requests-with-nginx-conditional-logging/#var_request_id). + + + +## Logging to Syslog + +The `syslog` utility is a standard for computer message logging and allows collecting log messages from different devices on a single syslog server. In NGINX, logging to syslog is configured with the `syslog:` prefix in [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log) and [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) directives. + +Syslog messages can be sent to a `server=` which can be a domain name, an IP address, or a UNIX-domain socket path. A domain name or IP address can be specified with a port to override the default port, `514`. A UNIX-domain socket path can be specified after the `unix:` prefix: + +```nginx +error_log syslog:server=unix:/var/log/nginx.sock debug; +access_log syslog:server=[2001:db8::1]:1234,facility=local7,tag=nginx,severity=info; +``` + +In the example, NGINX error log messages are written to a UNIX domain socket at the `debug` logging level, and the access log is written to a syslog server with an IPv6 address and port `1234`. + +The `facility=` parameter specifies the type of program that is logging the message. The default value is `local7`. Other possible values are: `auth`, `authpriv`, `daemon`, `cron`, `ftp`, `lpr`, `kern`, `mail`, `news`, `syslog`, `user`, `uucp`, `local0 ... local7`. + +The `tag=` parameter applies a custom tag to syslog messages (`nginx` in our example). + +The `severity=` parameter sets the severity level of syslog messages for access log. Possible values in order of increasing severity are: `debug`, `info`, `notice`, `warn`, `error` (default), `crit`, `alert`, and `emerg`. Messages are logged at the specified level and all more severe levels. In our example, the severity level `error` also enables `crit`, `alert`, and `emerg` levels to be logged. + + + +## Live Activity Monitoring + +NGINX Plus provides a real-time live activity monitoring interface that shows key load and performance metrics of your [HTTP]({{< relref "../load-balancer/http-load-balancer.md" >}}) and [TCP]({{< relref "../load-balancer/tcp-udp-load-balancer.md" >}}) upstream servers. See the [Live Activity Monitoring]({{< relref "live-activity-monitoring.md" >}}) article for more information. + +To learn more about NGINX Plus, please visit the [Products](https://www.nginx.com/products/) page. diff --git a/content/nginx/admin-guide/monitoring/new-relic-plugin.md b/content/nginx/admin-guide/monitoring/new-relic-plugin.md new file mode 100644 index 000000000..062acefbd --- /dev/null +++ b/content/nginx/admin-guide/monitoring/new-relic-plugin.md @@ -0,0 +1,109 @@ +--- +description: "Find what's new in version 2 of the NGINX and F5 NGINX Plus plug\u2011\ + in for New Relic \u2013 more convenience, more metrics, richer visualization." +docs: DOCS-427 +doctypes: +- task +title: Monitoring NGINX and NGINX Plus with the New Relic Plug-In +toc: true +weight: 500 +--- + +In March, 2013 we released the first version of the [“nginx web server” plug‑in](http://newrelic.com/plugins/nginx-inc/13) for New Relic monitoring of the NGINX Open Source software and F5 NGINX Plus. Since then, we’ve received lots of interest from users – we greatly appreciate it! The plug‑in continues to be one of the most popular New Relic plug‑ins, in spite of the few things that (we believe!) could be improved. If you don’t already have a New Relic account, [sign up](http://newrelic.com/). + +We selected Ruby as the base language for the original plug‑in, because the New Relic platform itself is written in Ruby, the API was not yet published, and many things were changing quickly. + +Later, after the launch of the New Relic platform, we continued to work on adding a number of improvements based on users’ feedback and our own vision of how to improve the overall usability of the NGINX monitoring plug‑in, and realized the need for further changes. + +## What’s New? + +Today, we are pleased to announce a major update of the NGINX plug‑in for New Relic, **version 2.0**, which includes the following changes: + +- The plug‑in is rewritten in Python. You no longer need to install Ruby. +- The plug‑in is finally packaged. There are prebuilt packages for RHEL/CentOS‑based and Debian/Ubuntu‑based systems. An included init script enables you to easily set up autostart of the plug‑in. +- There are two new sections in the live activity monitoring dashboard, for NGINX Plus customers: + + - **Servers** – Additional summary counters for virtual servers whose `server` configuration block includes the [status_zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone) directive + - **Cache** – Cumulative stats for all configured caches +- Verbose logging is enabled by default. + +## Installation + +Download the [plug‑in and installation instructions](https://www.nginx.com/nr-plugin/). + +## Configuring the Plug‑In + +The configuration file for the NGINX plug‑in is /etc/nginx-nr-agent/nginx-nr-agent.ini. The minimal configuration includes: + +- Your New Relic license key in the `newrelic_license_key` statement in the `global` section. + +- At least one `source` section. The name of the section is used in log entries only, and can contain almost any character string you want. Two parameters are required: + + - `name` – NGINX instance name in the New Relic UI. + - `url` – Full URL to the corresponding instance. The plug‑in accepts source data in the format generated when the [stub_status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html#stub_status) directive is included in the configuration of NGINX or NGINX Plus. It also supports the JSON‑formatted output generated when the [api](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) directive is included in an NGINX Plus configuration. + +You can include the optional `http_user` and `http_pass` statements to set HTTP basic authentication credentials in cases where the corresponding location is protected by the NGINX [auth_basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic) directive. + +The default log file is /var/log/nginx-nr-agent.log. + +## Running the Plug‑In + +You can manage the plug‑in with an init script. Run the `service` `nginx-nr-agent` command with the `start`, `stop`, or `status` argument to start, stop, or display the status of the plug‑in, respectively. + +When started from an init script, the plug‑in daemon runs as the `nobody` user. + +You can start the plug‑in directly by running `/usr/bin/nginx-nr-agent.py`, with the following optional parameters: + +- `-c`, `--config` – Path to configuration file +- `-p`, `--pidfile`> – Path to PID file +- `-f`, `--foreground` – Do not detach from terminal (useful for debugging) + +If everything is working as expected, the plug‑in appears as a tab in the New Relic UI: + +[![image of NGINX instance in New Relic instances list](/nginx/images/0s-instances.png)](/nginx/images/0s-instances.png) + +## Dashboard Examples + +### Overview + +The **Overview** tab displays the most basic metrics for the whole instance: number of active and idle connections, and request rate. + +[![image of Overview tab in New Relic UI](/nginx/images/1s-overview.png)](/nginx/images/1s-overview.png) + +### Connections + +The **Connections** tab displays various metrics about client connections. + +[![image of Connections tab in New Relic UI](/nginx/images/2s-connections.png)](/nginx/images/2s-connections.png) + +### Requests + +The **Requests** tab displays the number of requests currently being processed and the overall request rate. + +[![image of Requests tab in New Relic UI](/nginx/images/3s-requests.png)](/nginx/images/3s-requests.png) + +### Upstreams (NGINX Plus only) + +The **Upstreams** tab displays detailed statistics about all configured upstream groups. + +[![image of Upstreams tab in New Relic UI](/nginx/images/4s-upstreams.png)](/nginx/images/4s-upstreams.png) + +### Servers (NGINX Plus only) + +The **Servers** tab displays detailed statistics about all configured virtual server zones. + +[![image of Servers tab in New Relic UI](/nginx/images/5s-servers.png)](/nginx/images/5s-servers.png) + +### Cache (NGINX Plus only) + +The **Cache** tab displays summary statistics for all configured caches. + +[![image of Caches tab in New Relic UI](/nginx/images/6s-cache.png)](/nginx/images/6s-cache.png) + +## What’s Next? + +We plan to extend the plug‑in’s functionality with additional metrics for both NGINX and NGINX Plus, and your feedback is very important to us. Ideas, thoughts, questions? Let us know by commenting on this post. + +Thanks for using NGINX and NGINX Plus! + +To try NGINX Plus, start your [free 30-day trial](https://www.nginx.com/free-trial-request/) today or [contact us](https://www.nginx.com/contact-sales/) for a demo. diff --git a/content/nginx/admin-guide/security-controls/_index.md b/content/nginx/admin-guide/security-controls/_index.md new file mode 100644 index 000000000..2cc87a12f --- /dev/null +++ b/content/nginx/admin-guide/security-controls/_index.md @@ -0,0 +1,9 @@ +--- +description: Documentation explaining how to increase the security of an F5 NGINX or + NGINX Plus deployment, including SSL termination, authentication, and access control. +menu: + docs: + parent: NGINX Plus +title: Security Controls +weight: 600 +--- diff --git a/content/nginx/admin-guide/security-controls/configuring-http-basic-authentication.md b/content/nginx/admin-guide/security-controls/configuring-http-basic-authentication.md new file mode 100644 index 000000000..388e527f0 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/configuring-http-basic-authentication.md @@ -0,0 +1,160 @@ +--- +description: Control access using HTTP Basic authentication, and optionally in combination + with IP address-based access control. +docs: DOCS-428 +doctypes: +- task +title: Restricting Access with HTTP Basic Authentication +toc: true +weight: 300 +--- + + +## Introduction + +You can restrict access to your website or some parts of it by implementing a username/password authentication. Usernames and passwords are taken from a file created and populated by a password file creation tool, for example, `apache2-utils`. + +HTTP Basic authentication can also be combined with other access restriction methods, for example restricting access by [IP address]({{< relref "denylisting-ip-addresses.md" >}}) or [geographical location]({{< relref "controlling-access-by-geoip.md" >}}). + + +## Prerequisites + +- F5 NGINX Plus or NGINX Open Source +- Password file creation utility such as `apache2-utils` (Debian, Ubuntu) or `httpd-tools` (RHEL/CentOS/Oracle Linux). + + +## Creating a Password File + +To create username-password pairs, use a password file creation utility, for example, `apache2-utils` or `httpd-tools` + +1. Verify that `apache2-utils` (Debian, Ubuntu) or `httpd-tools` (RHEL/CentOS/Oracle Linux) is installed. +2. Create a password file and a first user. Run the `htpasswd` utility with the `-c` flag (to create a new file), the file pathname as the first argument, and the username as the second argument: + + ```shell + sudo htpasswd -c /etc/apache2/.htpasswd user1 + ``` + + Press Enter and type the password for **user1** at the prompts. + +3. Create additional user-password pairs. Omit the `-c` flag because the file already exists: + + ```shell + sudo htpasswd /etc/apache2/.htpasswd user2 + ``` + +4. You can confirm that the file contains paired usernames and hashed passwords: + + ```shell + $ cat /etc/apache2/.htpasswd + user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 + user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ + user3:$apr1$Mr5A0e.U$0j39Hp5FfxRkneklXaMrr/ + ``` + + +## Configuring NGINX and NGINX Plus for HTTP Basic Authentication + +1. Inside a location that you are going to protect, specify the [auth_basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic) directive and give a name to the password-protected area. The name of the area will be shown in the username/password dialog window when asking for credentials: + + ```nginx + location /api { + auth_basic "Administrator’s Area"; + #... + } + ``` + +2. Specify the [auth_basic_user_file](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file) directive with a path to the _.htpasswd_ file that contain user/password pairs: + + ```nginx + location /api { + auth_basic "Administrator’s Area"; + auth_basic_user_file /etc/apache2/.htpasswd; + } + ``` + +Alternatively, you you can limit access to the whole website with basic authentication but still make some website areas public. In this case, specify the `off` parameter of the [auth_basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic) directive that cancels inheritance from upper configuration levels: + +```nginx +server { + ... + auth_basic "Administrator’s Area"; + auth_basic_user_file conf/htpasswd; + + location /public/ { + auth_basic off; + } +} +``` + + +## Combining Basic Authentication with Access Restriction by IP Address + +HTTP basic authentication can be effectively combined with access restriction by IP address. You can implement at least two scenarios: + +- a user must be both authenticated and have a valid IP address +- a user must be either authenticated, or have a valid IP address + +1. Allow or deny access from particular IP addresses with the [allow](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow) and [deny](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny) directives: + + ```nginx + location /api { + #... + deny 192.168.1.2; + allow 192.168.1.1/24; + allow 127.0.0.1; + deny all; + } + ``` + + Access will be granted only for the `192.168.1.1/24` network excluding the `192.168.1.2` address. Note that the `allow` and `deny` directives will be applied in the order they are defined. + +2. Combine restriction by IP and HTTP authentication with the [satisfy](https://nginx.org/en/docs/http/ngx_http_core_module.html#satisfy) directive. + If you set the directive to to `all`, access is granted if a client satisfies both conditions. If you set the directive to `any`, access is granted if if a client satisfies at least one condition: + + ```nginx + location /api { + #... + satisfy all; + + deny 192.168.1.2; + allow 192.168.1.1/24; + allow 127.0.0.1; + deny all; + + auth_basic "Administrator’s Area"; + auth_basic_user_file conf/htpasswd; + } + ``` + + +## Complete Example + +The example shows how to protect your status area with simple authentication combined with access restriction by IP address: + +```nginx +http { + server { + listen 192.168.1.23:8080; + root /usr/share/nginx/html; + + location /api { + api; + satisfy all; + + deny 192.168.1.2; + allow 192.168.1.1/24; + allow 127.0.0.1; + deny all; + + auth_basic "Administrator’s Area"; + auth_basic_user_file /etc/apache2/.htpasswd; + } + } +} +``` + +When you access your status page, you are prompted to log in: + +[![auth_required](https://cdn.wp.nginx.com/wp-content/uploads/2016/10/auth_required.png)](https://cdn.wp.nginx.com/wp-content/uploads/2016/10/auth_required.png) + +If the provided name and password do not match the password file, you get the `401 (Authorization Required)` error. diff --git a/content/nginx/admin-guide/security-controls/configuring-jwt-authentication.md b/content/nginx/admin-guide/security-controls/configuring-jwt-authentication.md new file mode 100644 index 000000000..9e0443062 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/configuring-jwt-authentication.md @@ -0,0 +1,389 @@ +--- +description: This article explains how to control authentication of your web resources + using JWT authentication. +docs: DOCS-429 +doctypes: +- task +title: Setting up JWT Authentication +toc: true +weight: 500 +--- + + +## Introduction + +With F5 NGINX Plus it is possible to control access to your resources using JWT authentication. JWT is data format for user information in the OpenID Connect standard, which is the standard identity layer on top of the OAuth 2.0 protocol. Deployers of APIs and microservices are also turning to the JWT standard for its simplicity and flexibility. With JWT authentication, a client provides a JSON Web Token, and the token will be validated against a local key file or a remote service. + + +## Prerequisites + +- NGINX Plus Release 10 (R10) for native [JWT support](https://www.nginx.com/blog/nginx-plus-r10-released/#r10-jwt) +- NGINX Plus Release 14 (R14) for access to [nested JWT claims and longer signing keys](https://www.nginx.com/blog/nginx-plus-r14-released/#jwt) +- NGINX Plus Release 17 (R17) for [getting JSON Web keys from a remote location](https://www.nginx.com/blog/nginx-plus-r17-released/#r17-openid) +- NGINX Plus Release 24 (R24) for support of encrypted tokens (JWE) +- NGINX Plus Release 25 (R25) for support of Nested JWT, multiple sources of JSON Web keys, condition-based JWT authentication +- NGINX Plus Release 26 (R26) for support of [JWT key caching](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_cache) +- An identity provider (IdP) or service that creates JWT. For manual JWT generation, see "Issuing a JWT to API Clients" section of the [Authenticating API Clients with JWT and NGINX Plus](https://www.nginx.com/blog/authenticating-api-clients-jwt-nginx-plus/) blog post. + + + +NGINX Plus supports the following types of JWT: + +- JSON Web Signature (JWS) - JWT content is digitally signed. The following algorithms can be used for signing: + - HS256, HS384, HS512 + - RS256, RS384, RS512 + - ES256, ES384, ES512 + - EdDSA (Ed25519 and Ed448 signatures) + +- JSON Web Encryption (JWE) - the contents of JWT is encrypted. The following content encryption algorithms (the "enc" field of JWE header) are supported: + - A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + - A128GCM, A192GCM, A256GCM + + The following key management algorithms (the "alg" field of JWE header) are supported: + - A128KW, A192KW, A256KW + - A128GCMKW, A192GCMKW, A256GCMKW + - dir - direct use of a shared symmetric key as the content encryption key + - RSA-OAEP, RSA-OAEP-256, RSA-OAEP-384, RSA-OAEP-512 + +- Nested JWT - support for JWS enclosed into JWE + + +## Configuring NGINX Plus to Authenticate API + +Let's assume that NGINX Plus serves as a gateway (`proxy_pass http://api_server`) to a number of API servers (the [`upstream {}`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) block), and requests passed to the API servers should be authenticated: + +```nginx +upstream api_server { + server 10.0.0.1; + server 10.0.0.2; +} + +server { + listen 80; + + location /products/ { + proxy_pass http://api_server; + #... + } +} +``` + +To implement JWT for authentication: + +1. First, it is necessary to create a JWT that will be issued to a client. You can use your identity provider (IdP) or your own service to create JWTs. For testing purposes, you can create your own JWT, see [Authenticating API Clients with JWT and NGINX Plus](https://www.nginx.com/blog/authenticating-api-clients-jwt-nginx-plus/) blog post for details. + +2. Configure NGINX Plus to accept JWT: specify the [`auth_jwt`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt) directive that enables JWT authentication and also defines the authentication area (or "realm", "API" in the example): + + ```nginx + server { + listen 80; + + location /products/ { + proxy_pass http://api_server; + auth_jwt "API"; + #... + } + } + ``` + + NGINX Plus can also obtain the JWT from a query string parameter. To configure this, include the `token=` parameter to the [`auth_jwt`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt) directive: + + ```nginx + #... + auth_jwt "API" token=$arg_apijwt; + #... + ``` + +3. Specify the type of JWT - `signed` (JWS), `encrypted` (JWE) or `nested` (Nested JWT) - with the [`auth_jwt_type`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_type) directive. The default value of the directive is `signed`, so for JWS, the directive can be omitted. + + ```nginx + server { + listen 80; + + location /products/ { + proxy_pass http://api_server; + auth_jwt "API"; + auth_jwt_type encrypted; + #... + } + } + ``` + +4. Specify the path to the [JSON Web Key file](#jwk_create) that will be used to verify JWT signature or decrypt JWT content, depending on what you are using. This can be done with the [`auth_jwt_key_file`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) and/or [`auth_jwt_key_request`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directives. Specifying both directives at the same time will allow you to specify more than one source for keys. If none of the directives are specified, JWS signature verification will be skipped. + + In this scenario, the keys will be taken from two files: the `key.jwk` file and the `keys.json` file: + + ```nginx + server { + listen 80; + + location /products/ { + proxy_pass http://api_server; + auth_jwt "API"; + auth_jwt_type encrypted; + auth_jwt_key_file conf/key.jwk; + auth_jwt_key_file conf/keys.json; + } + } + ``` + + In this scenario, there are also two sources for the keys, but the private keys will be taken from the local file `private_jwe_keys.jwk`, while the public keys will be taken from the external identity provider service `https://idp.example.com` in a [subrequest](#auth_jwt_key_request): + + ```nginx + server { + listen 80; + + location /products/ { + proxy_pass http://api_server; + auth_jwt "API"; + auth_jwt_type encrypted; + auth_jwt_key_file private_jwe_keys.jwk; + auth_jwt_key_request /public_jws_keys; + } + + location /public_jws_keys { + proxy_pass "https_//idp.example.com/keys"; + } + } + ``` + + It is recommended to enable JWT key caching to get the optimal performance from the JWT module. For example, you can use the [`auth_jwt_key_cache`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_cache) directive for the above configuration, and enable the JWT key caching for one hour. Note that if the [`auth_jwt_key_request`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) or [`auth_jwt_key_file`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) are configured dynamically with variables, [`auth_jwt_key_cache`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_cache) cannot be used. + + ```nginx + server { + listen 80; + + location /products/ { + proxy_pass http://api_server; + auth_jwt "API"; + auth_jwt_type encrypted; + auth_jwt_key_file private_jwe_keys.jwk; + auth_jwt_key_request /public_jws_keys; + auth_jwt_key_cache 1h; + } + + location /public_jws_keys { + proxy_pass "https_//idp.example.com/keys"; + } + } + ``` + + +## How NGINX Plus Validates a JWT + +A JWT is considered to be valid when the following conditions are met: + +- The signature can be verified (for JWS) or payload can be decrypted (for JWE) with the key found in the [`auth_jwt_key_file`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) or [`auth_jwt_key_request`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) (matching on the `kid` ("key ID"), if present, and `alg` ("algorithm") header fields). +- The JWT is presented inside the validity period, when defined by one or both of the `nbf` ("not before") and `exp` ("expires") claims. + + + +## Creating a JSON Web Key File + +In order to validate the signature with a key or to decrypt data, a JSON Web Key (`key.jwk`) should be created. The file format is defined by [JSON Web Key specification](https://tools.ietf.org/html/rfc7517): + +```json +{"keys": + [{ + "k":"ZmFudGFzdGljand0", + "kty":"oct", + "kid":"0001" + }] +} +``` + +where: + +- the `k` field is the generated symmetric key (base64url-encoded) basing on a `secret` (`fantasticjwt` in the example). The secret can be generated with the following command: + +```shell +echo -n fantasticjwt | base64 | tr '+/' '-_' | tr -d '=' +ZmFudGFzdGljand0 +``` + +- the `kty` field defines the key type as a symmetric key (octet sequence) +- the `kid` (Key ID) field defines a serial number for this JSON Web Key + + + +## Getting JWKs from Subrequest + +NGINX Plus can be configured to fetch JSON Web Keys from the remote location - usually an identity provider, especially when using OpenID Connect. The IdP URI where the subrequest will be sent to is configured with the [`auth_jwt_key_request`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directive: + +```nginx +http { + #... + + server { + listen 80; + #... + + location / { + auth_jwt "closed site"; + auth_jwt_key_request /_jwks_uri; # Keys will be fetched by subrequest + + proxy_pass http://my_backend; + } + } +} +``` + +The URI may refer to an internal location (`_jwks_uri`) so that the JSON Web Key Set can be cached ([`proxy_cache`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache) and [`proxy_cache_path`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) directives) to avoid validation overhead. Turning on caching is recommended for high-load API gateways even if [JWT key caching](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_cache) is used as it will help to avoid overwhelming a key server with key requests when a JWT key cache expires. + +```nginx +http { + proxy_cache_path /var/cache/nginx/jwk levels=1 keys_zone=jwk:1m max_size=10m; + #... + + server { + listen 80; + #... + + location = /_jwks_uri { + internal; + proxy_method GET; + proxy_cache jwk; # Cache responses + proxy_cache_valid 200 12h; + proxy_pass https://idp.example.com/oauth2/keys; # Obtain keys from here + } + } +} +``` + +The full example of getting JWKs from a subrequest: + +```nginx +# +proxy_cache_path /var/cache/nginx/jwk levels=1 keys_zone=jwk:1m max_size=10m; + +server { + listen 80; # Use SSL/TLS in production + + location / { + auth_jwt "closed site"; + auth_jwt_key_cache 1h; + auth_jwt_key_request /_jwks_uri; # Keys will be fetched by subrequest + + proxy_pass http://my_backend; + } + + location = /_jwks_uri { + internal; + proxy_method GET; + proxy_cache jwk; # Cache responses + proxy_cache_valid 200 12h; + proxy_pass https://idp.example.com/oauth2/keys; # Obtain keys from here + } +} +``` + + +## Arbitrary JWT Claims Validation + +During JWT verification, NGINX Plus automatically validates only `nbf` ("not before") and `exp` ("expires") claims. However, in some cases you need to set more conditions for a successful JWT validation, in particular when dealing with application-specific or protocol level claims. For example, OpenID Connect Core requires validation of `iss` ("issuer"), `aud` ("audience"), `sub` ("subject") claims for `ID` token. + +Additional conditions for JWT validation can be set as variables with the [`map`](https://nginx.org/en/docs/http/ngx_http_map_module.html) module and then evaluated with the [`auth_jwt_require`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_require) directive. + +In this scenario, we are verifying that: + +- the recipient of the token (audience) is our APIs (map rule 1) +- the token was issued by a trusted identity provider (map rule 2) +- scopes in APIs called on behalf of administrators (map rule 3) + +The values of three resulting variables are evaluated in the [`auth_jwt_require`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_require) directive, and if the value of each variable is `1`, the JWT will be accepted: + +```nginx +upstream api_server { + server 10.0.0.1; + server 10.0.0.2; +} + +map $jwt_claim_aud $valid_app_id { #map rule 1: + "~api\d.example.com" 1; #token issued only for target apps +} + +map $jwt_claim_iss $valid_issuer { #map rule 2: + "https://idp.example.com/sts" 1; #token issued by trusted CA +} + +map $jwt_claim_scope $valid_scope { #map rule 3: + "access_as_admin" 1; #access as admin only +} + +server { + listen 80; + + location /products/ { + auth_jwt "API"; + auth_jwt_key_file conf/api_secret.jwk; + auth_jwt_require $valid_app_id $valid_issuer $valid_scope; + proxy_pass http://api_server; + } +} +``` + +In some cases the [`auth_jwt_require`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_require) directive can be specified multiple times, for example, for the purpose of authentication and then for authorization. In case of an error, the `401` code will be displayed. Assigning the custom error code `403` to another `auth_jwt_require` directive makes ti possible to differentiate authentication and authorization usecases and handle corresponding failures appropriately: + +```nginx + location /products/ { + auth_jwt "API"; + auth_jwt_key_file conf/api_secret.jwk; + auth_jwt_require $valid_app_id $valid_issuer $valid_scope; + auth_jwt_require $valid_scope error=403; + proxy_pass http://api_server; + } +``` + + +## Nested JWT Extraction + +A Nested JWT is a JWS token enclosed into JWE. In a Nested JWT, the sensitive information from JWS is protected with extra encryption of JWE. + +Using Nested JWT may be preferable over JWE because: + +- in case of JWE, the target application/service needs to decrypt the token first, then verify the signature. Decrypt operation on the application side may be time and resource consuming. + +- in case of Nested JWT, as NGINX Plus resides in the same trusted network with the target application, there is no need for token encryption between NGINX Plus and the application. NGINX Plus decrypts the JWE, checks the enclosed JWS, and sends the Bearer Token to the application. This will offload JWE decryption from the application to NGINX Plus. + +- if your application doesn't support JWE, using Nested JWT enables full protection for JWS. + +To enable Nested tokens: + +1. Specify the `nested` type of JWT with the [`auth_jwt_type`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_type) directive. + +```nginx +auth_jwt_type nested; +``` + +2. Pass the decrypted payload (the [`$jwt_payload`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#var_jwt_payload) variable) to the application as the Bearer token value in the `Authorization` header: + +```nginx +proxy_set_header Authorization "Bearer $jwt_payload"; +``` + +This example sums up the previous steps into one configuration: + +```nginx +upstream api_server { + server 10.0.0.1; + server 10.0.0.2; +} + +http { + server { + listen 80; + + auth_jwt "API"; + auth_jwt_type nested; + auth_jwt_key_file conf/api_secret.jwk; + + proxy_pass http://api_server; + proxy_set_header Authorization "Bearer $jwt_payload"; + } +} +``` + + +## See Also + +- [Authenticating API Clients with JWT and NGINX Plus](https://www.nginx.com/blog/authenticating-api-clients-jwt-nginx-plus/) diff --git a/content/nginx/admin-guide/security-controls/configuring-subrequest-authentication.md b/content/nginx/admin-guide/security-controls/configuring-subrequest-authentication.md new file mode 100644 index 000000000..cb1c28155 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/configuring-subrequest-authentication.md @@ -0,0 +1,111 @@ +--- +description: Authenticate clients during request processing by making a subrequest + to an external authentication service, such as LDAP or OAuth. +docs: DOCS-430 +doctypes: +- task +title: Authentication Based on Subrequest Result +toc: true +weight: 400 +--- + + +## Introduction + +NGINX and F5 NGINX Plus can authenticate each request to your website with an external server or service. To perform authentication, NGINX makes an HTTP subrequest to an external server where the subrequest is verified. If the subrequest returns a `2xx` response code, the access is allowed, if it returns `401` or `403`, the access is denied. Such type of authentication allows implementing various authentication schemes, such as multifactor authentication, or allows implementing LDAP or OAuth authentication. + +## Prerequisites + +- NGINX Plus or NGINX Open Source +- External authentication server or service + + +## Configuring NGINX and NGINX Plus + +1. Make sure your NGINX Open Source is compiled with the `with-http_auth_request_module` configuration option. Run this command and verify that the output includes `--with-http_auth_request_module`: + + ```none + nginx -V 2>&1 | grep -- 'http_auth_request_module' + ``` + + Skip this step for NGINX Plus as it already includes the auth_request module. + +2. In the location that requires request authentication, specify the [auth_request](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html#auth_request) directive in which specify an internal location where an authorization subrequest will be forwarded to: + + ```nginx + location /private/ { + auth_request /auth; + #... + } + ``` + + Here, for each request to **/private**, a subrequest to the internal **/auth** location will be made. + +3. Specify an internal location and the [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive inside this location that will proxy authentication subrequests to an authentication server or service: + + ```nginx + location = /auth { + internal; + proxy_pass http://auth-server; + #... + } + ``` + +4. As the request body is discarded for authentication subrequests, you will need to set the [proxy_pass_request_body](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_request_body) directive to `off` and also set the `Content-Length` header to a null string: + + ```nginx + location = /auth { + internal; + proxy_pass http://auth-server; + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + #... + } + ``` + +5. Pass the full original request URI with arguments with the [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) directive: + + ```nginx + location = /auth { + internal; + proxy_pass http://auth-server; + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + proxy_set_header X-Original-URI $request_uri; + } + ``` + +6. As an option, you can set a variable value basing on the result of the subrequest with the [auth_request_set](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html#auth_request_set) directive: + + ```nginx + location /private/ { + auth_request /auth; + auth_request_set $auth_status $upstream_status; + } + ``` + + +## Complete Example + +This example sums up the previous steps into one configuration: + +```nginx +http { + #... + server { + #... + location /private/ { + auth_request /auth; + auth_request_set $auth_status $upstream_status; + } + + location = /auth { + internal; + proxy_pass http://auth-server; + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + proxy_set_header X-Original-URI $request_uri; + } + } +} +``` diff --git a/content/nginx/admin-guide/security-controls/controlling-access-by-geoip.md b/content/nginx/admin-guide/security-controls/controlling-access-by-geoip.md new file mode 100644 index 000000000..8f5ebbd58 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/controlling-access-by-geoip.md @@ -0,0 +1,399 @@ +--- +description: Control access or forward traffic to different upstream servers based + on the client's geographical location, using the GeoIP2 dynamic module. +docs: DOCS-431 +doctypes: +- task +title: Restricting Access by Geographical Location +toc: true +weight: 800 +--- + + +## Introduction + +F5 NGINX Plus can differentiate users based on their geographical location. For example, you can have different website content for different countries, or you can restrict content distribution to a particular country or city. + +NGINX Plus uses third-party MaxMind databases to match the IP address of the user and its location. As soon as the geoposition is known, it is then possible to use geoip-based variables in the [map](https://nginx.org/en/docs/http/ngx_http_map_module.html) or the [split_clients](https://nginx.org/en/docs/http/ngx_http_split_clients_module.html) module. + +> **Note** MaxMind GeoLite Legacy databases are currently [discontinued](https://blog.maxmind.com/2018/01/discontinuation-of-the-geolite-legacy-databases), MaxMind GeoIP2 or GeoLite2 databases and NGINX Plus [GeoIP2 module]({{< relref "../dynamic-modules/geoip2.md" >}}) should be used instead. + +Restricting by geographical location works both for HTTP and TCP/UDP protocols. + + + +## Prerequisites + +- NGINX Plus [GeoIP2 dynamic module]({{< relref "../dynamic-modules/geoip2.md" >}}) +- [GeoIP2](https://www.maxmind.com/en/geoip2-databases) or [GeoLite2](https://dev.maxmind.com/geoip/geoip2/geolite2/) databases from MaxMind +- (optional) [mmdblookup](http://maxmind.github.io/libmaxminddb/mmdblookup.html) utility that looks up an IP address in a MaxMind Database file + + + +## Getting the Databases + +The GeoIP2 or GeoLite2 databases can be obtained from the [MaxMind download page](https://www.maxmind.com/en/geoip2-databases). In this example, the GeoLite2 free downloadable databases are used. + +To get and unpack GeoLite2 Country database: + +```shell +wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.mmdb.gz +gunzip GeoLite2-Country.mmdb.gz +``` + +To get and unpack GeoLite2 City database: + +```shell +wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz +gunzip GeoLite2-City.mmdb.gz +``` + + + +## Understanding Database Structure + +To see the available geodata, you can query the GeoLite2-Country and GeoLite2-City databases with the [mmdblookup](http://maxmind.github.io/libmaxminddb/mmdblookup.html) utility. The geodata is represented as the JSON tree. + +Install the [libmaxminddb](http://maxmind.github.io/libmaxminddb/index.html) database utility: + +- For Amazon Linux, CentOS, Oracle Linux, and RHEL: + + ```shell + yum install libmaxminddb-devel + ``` + +- For Debian and Ubuntu: + + ```shell + apt-get install libmaxminddb-dev + ``` + +- For SLES: + + ```shell + zypper install libmaxminddb-devel + ``` + +A query to the databases can be sent in the following format: + +```none +mmdblookup –file [FILE PATH] –ip [IP ADDRESS] [DATA PATH] +``` + +For example, to get all available geodata for the `8.8.8.8` IP address, send the following command: + +```shell +mmdblookup --file /usr/local/etc/geoip2/GeoLite2-Country.mmdb --ip 8.8.8.8 +``` + +The output will be: + +```json +{ + "continent": + { + "code": + "NA" + "geoname_id": + 6255149 + "names": + { + "de": + "Nordamerika" + "en": + "North America" + "es": + "Norteamérica" + "fr": + "Amérique du Nord" + "ja": + "北アメリカ" + "pt-BR": + "América do Norte" + "ru": + "Северная Америка" + "zh-CN": + "北美洲" + } + } + "country": + { + "geoname_id": + 6252001 + "iso_code": + "US" + "names": + { + "de": + "USA" + "en": + "United States" + "es": + "Estados Unidos" + "fr": + "États-Unis" + "ja": + "アメリカ合衆国" + "pt-BR": + "Estados Unidos" + "ru": + "США" + "zh-CN": + "美国" + } + } + "registered_country": + { + "geoname_id": + 6252001 + "iso_code": + "US" + "names": + { + "de": + "USA" + "en": + "United States" + "es": + "Estados Unidos" + "fr": + "États-Unis" + "ja": + "アメリカ合衆国" + "pt-BR": + "Estados Unidos" + "ru": + "США" + "zh-CN": + "美国" + } + } + } +``` + +To get particular geodata, for example, only the ISO code of a particular country, add the `country iso_code` parameters to the end of the command: + +```shell +mmdblookup --file /usr/local/etc/geoip2/GeoLite2-Country.mmdb --ip 8.8.8.8 country iso_code +``` + +These parameters are also used when creating variables in the GeoIP2 module for NGINX. + + + +## Configuring GeoIP2 in NGINX Plus + +1. Install the GeoIP2 dynamic module for NGINX Plus: + + For Amazon Linux, CentOS, Oracle Linux, and RHEL: + + ```shell + yum install nginx-plus-module-geoip2 + ``` + + For Debian and Ubuntu: + + ```shell + apt-get install nginx-plus-module-geoip2 + ``` + + For SLES: + + ```shell + zypper install nginx-plus-module-geoip2 + ``` + +2. Enable the GeoIP2 dynamic module in the NGINX Plus configuration file with the [load_module](https://nginx.org/en/docs/ngx_core_module.html#load_module) directive specified in the `main` configuration level: + + ```nginx + load_module modules/ngx_http_geoip2_module.so; + load_module modules/ngx_stream_geoip2_module.so; + + http { + # ... + } + ``` + +3. Add the paths to the country and city databases to the NGINX configuration with the `geoip2 {}` block for [`http {}`](https://nginx.org/en/docs/http/ngx_http_core_module.html#http), [`stream {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream), or both: + + ```nginx + http { + #... + geoip2 GeoIP2/GeoLite2-Country.mmdb { + #... + } + + geoip2 GeoIP2/GeoLite2-City.mmdb { + #... + } + } + + stream { + #... + geoip2 GeoIP2/GeoLite2-Country.mmdb { + #... + } + + geoip2 GeoIP2/GeoLite2-City.mmdb { + #... + } + } + ``` + +4. Basing on the [GeoIP database structure](#mmdblookup), create custom variables that will keep the data from the GeoIP2 database and then later pass the data to the [map](https://nginx.org/en/docs/http/ngx_http_map_module.html) or [split_clients](https://nginx.org/en/docs/http/ngx_http_split_clients_module.html) directives (can be applied in both the [`http {}`](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) and [`stream {}`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) contexts): + + + ```nginx + geoip2 GeoIP2/GeoLite2-City.mmdb { + $geoip2_data_city_name city names en; + $geoip2_data_postal_code postal code; + $geoip2_data_latitude location latitude; + $geoip2_data_longitude location longitude; + $geoip2_data_state_name subdivisions 0 names en; + $geoip2_data_state_code subdivisions 0 iso_code; + } + + geoip2 GeoIP2/GeoLite2-Country.mmdb { + $geoip2_data_continent_code continent code; + $geoip2_data_country_iso_code country iso_code; + } + + #... + ``` + + + +## Scenario: Choosing the Nearest Server + +Using the geolocation data from the created variables, a client connection can be redirected to the closest server, thus reducing network latency and improving connection speed. + +This can be achieved by using the continent code from the GeoIP2 database in a variable and the [map](https://nginx.org/en/docs/http/ngx_http_map_module.html) module that will create another variable whose value will be the closest server basing on a continent location. Basing on this value, NGINX will pass the request to the corresponding upstream server group. + +1. Make sure you have configured the servers or [upstream server groups]({{< relref "../load-balancer/http-load-balancer.md" >}}) for each continent, for example, `eu` for Europe, `na` for North America, `all` for cases when the IP address cannot be matched against the GeoIP database: + + ```nginx + upstream all { + server all1.example.com:12345; + server all2.example.com:12345; + } + + upstream eu { + server eu1.example.com:12345; + server eu2.example.com:12345; + } + + upstream na { + server na1.example.com:12345; + server na2.example.com:12345; + } + ``` + +2. Add the `geoip2 {}` block with a variable of any name (for example, `$geoip2_data_continent_code`) that obtains the continent code of the GeoIP2 database: + + ```nginx + geoip2 GeoIP2/GeoLite2-Country.mmdb { + $geoip2_data_continent_code continent code; + } + + #... + ``` + +3. Create the [map](https://nginx.org/en/docs/http/ngx_http_map_module.html) block that will create the `$nearest_server` variable: + + ```nginx + #... + map $geoip2_data_continent_code $nearest_server { + default all; + EU eu; + NA na; + AS as; + AF af; + } + #... + ``` + +3. Create the [`server {}`](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) block which will pass the requests to one of the upstream server groups according to the value passed in the `$nearest_server` variable: + + ```nginx + server { + listen 12346; + proxy_pass http://$nearest_server; + } + ``` + +If the continent is Europe, then the value of the `$nearest_server` will be `eu`, and the connection will be passed to the `eu` upstream via the [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive: + +```nginx +#... +server { + listen 12346; + proxy_pass http://$nearest_server; +} + +upstream all { + server all1.example.com:12345; + server all2.example.com:12345; + +upstream eu { + server eu1.example.com:12345; + server eu2.example.com:12345; +} +upstream na { + server na1.example.com:12345; + server na2.example.com:12345; +} +#... +``` + + +### Example + +This example can be applied in both the [http](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) and [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) contexts. + +```nginx +# can be either "http {}" or "stream {}" +#... +geoip2 GeoIP2/GeoLite2-Country.mmdb { + $geoip2_data_continent_code continent code; +} + +map $geoip2_data_continent_code $nearest_server { + default all; + EU eu; + NA na; + AS as; + AF af; +} + +server { + listen 12346; + proxy_pass http://$nearest_server; + } + +upstream all { + server all1.example.com:12345; + server all2.example.com:12345; +} + +upstream eu { + server eu1.example.com:12345; + server eu2.example.com:12345; +} + +upstream na { + server na1.example.com:12345; + server na2.example.com:12345; +} +``` + +In this example, the IP address will be checked in the `GeoLite2-Country.mmdb` database, the result will be written to the `$geoip2_data_continent_code` variable. NGINX Plus will match the value of the variable against values in the [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map) directive and write the result in the custom variable, in our example `$nearest_server`. Basing on the value of the `$nearest_server`, the [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive will choose a corresponding upstream server. + + + +## More Info + +- [GeoIP2 Dynamic Module Installation Instructions]({{< relref "geoip2.md" >}}) + +- [MaxMind GeoIP2 Databases](https://www.maxmind.com/en/geoip2-databases) + +- [MaxMind Geolite2 Free Downloadable Databases](https://dev.maxmind.com/geoip/geoip2/geolite2/) diff --git a/content/nginx/admin-guide/security-controls/controlling-access-proxied-http.md b/content/nginx/admin-guide/security-controls/controlling-access-proxied-http.md new file mode 100644 index 000000000..c6dddf763 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/controlling-access-proxied-http.md @@ -0,0 +1,309 @@ +--- +description: Protect your upstream web and application servers by limiting connections, + rate of requests, or bandwidth, based on client IP address or other variables. +docs: DOCS-432 +doctypes: +- task +title: Limiting Access to Proxied HTTP Resources +toc: true +weight: 600 +--- + +This article explains how to set the maximum number of requests for a connection, or the maximum rate of downloading content from the server. + + +## Introduction + +Using NGINX and F5 NGINX Plus, it is possible to limit: + +- The number of connections per key value (for example, per IP address) +- The request rate per key value (the number of requests that are allowed to be processed during a second or minute) +- The download speed for a connection + +Note that IP addresses can be shared behind NAT devices, so limiting by IP address should be used judiciously. + + + +## Limiting the Number of Connections + +To limit the number of connections: + +1. Use the [limit_conn_zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone) directive to define the key and set the parameters of the shared memory zone (the worker processes will use this zone to share counters for key values). As the first parameter, specify the expression evaluated as a key. In the second parameter `zone`, specify the name of the zone and its size: + + ```nginx + limit_conn_zone $binary_remote_addr zone=addr:10m; + ``` + +2. Use the [limit_conn](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn) directive to apply the limit within the `location {}`, `server {}`, or `http {}` context. Specify the name of the shared memory zone as the first parameter, and the number of allowed connection per key as the second parameter: + + ```nginx + location /download/ { + limit_conn addr 1; + } + ``` + + The number of connections is limited on an IP address basis because the [`$binary_remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_binary_remote_addr) variable is used as a key. + + Another way to limit the number of connections for a given server is by using the [`$server_name`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_server_name) variable: + + ```nginx + http { + limit_conn_zone $server_name zone=servers:10m; + + server { + limit_conn servers 1000; + } + } + ``` + + + +## Limiting the Request Rate + +Rate limiting can be used to prevent DDoS attacks, or prevent upstream servers from being overwhelmed by too many requests at the same time. The method is based on the [`leaky bucket`](https://en.wikipedia.org/wiki/Leaky_bucket) algorithm: requests arrive at the bucket at various rates and leave the bucket at fixed rate. + +Before using rate limiting, you will need to configure global parameters of the "leaky bucket": + +- key - a parameter used to differentiate one client from another, generally a variable +- shared memory zone - the name and size of the zone that keeps states of these keys (the "leaky bucket") +- rate - the request rate limit specified in requests per second (`r/s`) or requests per minute (`r/m`) ("leaky bucket draining"). Requests per minute are used to specify a rate less than one request per second. + +These parameters are set with the [limit_req_zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) directive. The directive is defined on the `http {}` level - such approach allows applying different zones and request overflow parameters to different contexts: + +```nginx +http { + #... + limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s; +} +``` + +With this configuration, the shared memory zone `one` with the size of 10 megabytes is created. +The zone keeps states of client IP addresses set with the [`$binary_remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_binary_remote_addr) variable. Note that in comparison to [`$remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_addr) which also holds a client’s IP address, [`$binary_remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_binary_remote_addr) holds the binary representation of IP address which is shorter. + +The optimal size of the shared memory zone can be counted using the following data: +the size of [`$binary_remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_binary_remote_addr) value is 4 bytes for IPv4 addresses, +stored state occupies 128 bytes on 64-bit platforms. Thus, state information for about 16,000 IP addresses occupies 1 megabyte of the zone. + +If storage is exhausted when NGINX needs to add a new entry, it removes the oldest entry. If the space freed is still not enough to accommodate the new record, NGINX returns status code `503 Service Unavailable`. The status code can be redefined with the [limit_req_status](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_status) directive. + +Once the zone is set, you can use requests limiting anywhere in the NGINX configuration with the [limit_req](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req) specified for a `server {}`, `location {}`, or `http {}` context: + +```nginx +http { + #... + + limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s; + + server { + #... + + location /search/ { + limit_req zone=one; + } + } +} +``` + +With this configuration, NGINX will process no more than `1` request per second within the `/search/` location. Processing of these requests is delayed in such a way that the overall rate is not greater than specified. If the number of requests exceeds the specified rate, NGINX will delay processing of such requests until the "bucket" (shared memory zone `one`) is full. For requests that arrive at the full bucket, NGINX will respond with the `503 Service Unavailable` error (if not redefined with [limit_req_status](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_status)). + + + +### Testing the Request Rate Limit + +Before configuring real-life rate limiting, you can try the “dry run” mode that does not limit the requests processing rate. However, such excessive requests are still accounted in the shared memory zone and logged. The “dry run” mode can be enabled with the [limit_req_dry_run](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_dry_run) directive: + +```nginx +http { + #... + + limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s; + + server { + #... + + location /search/ { + limit_req zone=one; + limit_req_dry_run on; + } + } +} +``` + +Every request that would exceed the defined rate limit will be logged with the “dry run” mark: + +```none +2019/09/03 10:28:45 [error] 142#142: *13246 limiting requests, dry run, excess: 1.000 by zone "one", client: 172.19.0.1, server: www.example.com, request: "GET / HTTP/1.0", host: "www.example.com:80" +``` + + + +### Handling Excessive Requests + +Requests are limited to fit the rate defined in the [limit_req_zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) directive. If the number of requests exceeds the specified rate and the shared memory zone becomes full, NGINX will respond with an error. As traffic tends to be bursty, returning the error in response to a client request during traffic bursts is not the best case. + +Such excessive requests in NGINX can be buffered and processed. The `burst` parameter of the [limit_req](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req) directive sets the maximum number of excessive requests that await to be processed at the specified rate: + +```nginx +http { + #... + + limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s; + + server { + #... + + location /search/ { + limit_req zone=one burst=5; + } + } +} +``` + +With this configuration, if request rate exceeds `1` request per second, requests beyond the rate will be put into the zone `one`. When the zone is full, excessive requests will be queued (`burst`), the size of this queue is `5` requests. Request processing in the queue is delayed in such a way that the overall rate is not greater than specified. Requests above the burst limit will be rejected with the `503` error. + +If delaying of request is not desired during traffic burst, add the `nodelay` parameter: + +```nginx +http { + #... + + limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s; + + server { + #... + + location /search/ { + limit_req zone=one burst=5 nodelay; + } + } +} +``` + +With this configuration, excessive requests within the `burst` limit will be served immediately regardless of the specified `rate`, requests above the burst limit will be rejected with the `503` error. + + + +### Delaying Excessive Requests + +Another way to handle excessive requests is to serve some number of these requests without delay, then apply rate limiting up to the point when excessive requests will be rejected. + +This can be achieved with the `delay` and `burst` parameters. The `delay` parameter defines the point at which excessive requests are delayed to comply with the defined rate limit: + +```nginx +http { + #... + + limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s; + + server { + #... + + location /search/ { + limit_req zone=one burst=5 delay=3; + } + } +} +``` + +With this configuration, first 3 requests (`delay`) are passed without delay, next 2 requests (`burst` - `delay`) are delayed in such a way that the overall rate is not greater than specified, further excessive requests will be rejected because the total burst size has been exceeded, subsequent requests will be delayed. + + + +### Synchronizing Contents of Many Shared Memory Zones + +If you have a computer cluster with several NGINX instances and these instances use the `limit_req` method, it is possible to sync the contents of their shared memory zones on conditions that: + +- the [zone_sync](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync) functionality is configured for each instance +- shared memory zones set in the [limit_req_zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) directive for each instance have the same name +- the [sync](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone_sync) parameter of the [limit_req_zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) directive is specified for each instance: + +```nginx +http { + #... + limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s sync; +} +``` + +See [Runtime State Sharing in a Cluster]({{< relref "../high-availability/zone_sync.md" >}}) for details. + + + +## Limiting the Bandwidth + +To limit the bandwidth per connection, use the [limit_rate](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate) directive: + +```nginx +location /download/ { + limit_rate 50k; +} +``` + +With this setting a client will be able to download content through a single connection at a maximum speed of `50` kilobytes per second. However, the client can open several connections. So if the goal is to prevent a speed of downloading greater than the specified value, the number of connections should also be limited. For example, one connection per IP address (if the shared memory zone specified above is used): + +```nginx +location /download/ { + limit_conn addr 1; + limit_rate 50k; +} +``` + +To impose the limit only after the client downloads a certain amount of data, use the [limit_rate_after](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after) directive. It may be reasonable to allow a client to quickly download a certain amount of data (for example, a file header — film index) and limit the rate for downloading the rest of the data (to make users watch a film, not download). + +```nginx +limit_rate_after 500k; +limit_rate 20k; +``` + +The following example shows the combined configuration for limiting the number of connections and the bandwidth. The maximum allowed number of connections is set to `5` connections per client address, which fits most common cases since modern browsers typically open up to 3 connections at a time. Meanwhile the location that serves downloads allows only one connection: + +```nginx +http { + limit_conn_zone $binary_remote_address zone=addr:10m; + + server { + root /www/data; + limit_conn addr 5; + + location / { + } + + location /download/ { + limit_conn addr 1; + limit_rate_after 1m; + limit_rate 50k; + } + } +} +``` + + + +### Dynamic Bandwidth Control + +The [limit_rate](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate) value can also be specified as a variable - this enables dynamic bandwidth use cases, for example, allow a higher bandwidth limit to modern browsers: + +```nginx +map $ssl_protocol $response_rate { + "TLSv1.1" 10k; + "TLSv1.2" 100k; + "TLSv1.3" 1000k; +} + +server { + listen 443 ssl; + ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate www.example.com.crt; + ssl_certificate_key www.example.com.key; + + location / { + limit_rate $response_rate; # Limit bandwidth based on TLS version + limit_rate_after 512; # Apply limit after headers have been sent + proxy_pass http://my_backend; + } +} +``` + + +## See Also + +- [Rate Limiting with NGINX and NGINX Plus](https://www.nginx.com/blog/rate-limiting-nginx/) diff --git a/content/nginx/admin-guide/security-controls/controlling-access-proxied-tcp.md b/content/nginx/admin-guide/security-controls/controlling-access-proxied-tcp.md new file mode 100644 index 000000000..12e2cc6a3 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/controlling-access-proxied-tcp.md @@ -0,0 +1,94 @@ +--- +description: Protect your upstream TCP application servers by limiting connections + or bandwidth, based on client IP address or other variables. +docs: DOCS-433 +doctypes: +- task +title: Restricting Access to Proxied TCP Resources +toc: true +weight: 700 +--- + +This chapter provides scenarios for restricting access to a database or media server that communicates over TCP. Access can be limited by IP address, the number of simultaneous connections, or bandwidth. + + +## Restricting Access by IP Address + +NGINX can allow or deny access based on a particular IP address or the range of IP addresses of client computers. To allow or deny access, use the [allow](https://nginx.org/en/docs/stream/ngx_stream_access_module.html#allow) and [deny](https://nginx.org/en/docs/stream/ngx_stream_access_module.html#deny) directives inside the [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) context or a [server](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server) block: + +```nginx +stream { + #... + server { + listen 12345; + deny 192.168.1.2; + allow 192.168.1.1/24; + allow 2001:0db8::/32; + deny all; + } +} +``` + +The rules are processed in sequence, from top to bottom: if the first directive in the sequence is `deny all`, then all further `allow` directives have no effect. In this example, the subnet `192.168.1.1/24` is allowed access, with the exception of `192.168.1.2`. The `2001:0db8::/32` range of IPv6 addresses is also allowed, and access to any other IP addresses is denied. + + +## Limiting the Number of TCP Connections + +You can limit the number of simultaneous TCP connections from one IP address. This can be useful in preventing denial-of-service (DoS) attacks. + +First, let’s define the _zone_ that will store the maximum number of TCP connections to one server, and a key to identify the connection. This can be done with the [limit_conn_zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone) directive in the [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) context: + +```nginx +stream { + #... + limit_conn_zone $binary_remote_addr zone=ip_addr:10m; + #... +} +``` + +The key that identifies the connection is defined as `$binary_remote_addr`, which represents the IP address of the client in binary format. The name of the shared memory zone is `ip_addr` and the zone size is 10 megabytes. + +After the zone is defined, limit connections with the [limit_conn](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn) directive. Its first parameter specifies the name of the shared memory zone previously defined by [limit_conn_zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). As the second parameter, specify the maximum number of allowed connections for each IP address, in either the [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) context or a [server](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server) block (as in this example, which also shows the prerequisite `limit_conn_zone` directive): + +```nginx +stream { + #... + limit_conn_zone $binary_remote_addr zone=ip_addr:10m; + + server { + #... + limit_conn ip_addr 1; + } +} +``` + +When limiting the number of connections per IP address, be aware that multiple hosts behind a Network Address Translation (NAT) device share the same IP address. + + +## Limiting the Bandwidth + +You can configure the maximum download or upload speed for TCP connections. Include the [proxy_download_rate](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_download_rate) or [proxy_upload_rate](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_upload_rate) directive, respectively: + +```nginx +server { + #... + proxy_download_rate 100k; + proxy_upload_rate 50k; +} +``` + +With these settings a client can download data through a single connection at a maximum speed of 100 kilobytes per second, and upload data through a single connection at a maximum speed of 50 kilobytes per second. However, the client can open several connections. So if the goal is to limit overall speed of loading for each client, the number of connections must also be limited to `1` as described in the previous section. + +```nginx +stream { + #... + limit_conn_zone $binary_remote_addr zone=ip_addr:10m; + + server { + #... + limit_conn ip_addr 1; + proxy_download_rate 100k; + proxy_upload_rate 50k; + } +} +``` diff --git a/content/nginx/admin-guide/security-controls/denylisting-ip-addresses.md b/content/nginx/admin-guide/security-controls/denylisting-ip-addresses.md new file mode 100644 index 000000000..49fb47d05 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/denylisting-ip-addresses.md @@ -0,0 +1,232 @@ +--- +description: Control access to your site or apps from specific client IP addresses, + using dynamic denylists built with the F5 NGINX Plus key-value store and API. +docs: DOCS-434 +doctypes: +- task +title: Dynamic Denylisting of IP Addresses +toc: true +weight: 1100 +--- + +This section describes how to create a denylist or allowlist of specific client IP addresses, which denies or allows them access to your site, and how to dynamically maintain the list of addresses. + + +## Overview + +In F5 NGINX Plus Release 13 (R13) and later, you can denylist some IP addresses as well as create and maintain a database of denylisted IP addresses. You can also explicitly allowlist other IP addresses. The IP addresses database is managed with the NGINX Plus API and keyval modules. + +NGINX Plus Release 19 (R19) extends this capability by matching an IP address to any address within a subnet or network range. + + + +## Prerequisites + +NGINX Plus Release 13 and later, NGINX Plus Release 19 and later for network ranges support. + + +## Setup + +First, enable the database for storing the list of denylisted and allowlisted IP addresses. + +1. In NGINX Plus configuration file, include the [keyval_zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone) directive in the [http](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) context to create a memory zone for storing keys and values. This sample directive creates a 1‑MB zone called **one**. + + ```nginx + http { + # ... + keyval_zone zone=one:1m; + } + ``` + + To perform matching of an IP address against subnets (for example, `192.168.13.0/24`), specify the `type=ip` parameter of the [keyval_zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone) directive: + + ```nginx + http { + # ... + keyval_zone zone=one:1m type=ip; + } + ``` + + Note that the size of [keyval_zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone) should also be increased as the `type=ip` parameter also enables an extra index stored in the zone. + + You can optionally include the `state` parameter to create a file where the key‑value database is stored and so persists across NGINX Plus reloads and restarts; in this example, **one.keyval**: + + ```nginx + keyval_zone zone=one:1m state=one.keyval; + ``` + +2. Enable the NGINX Plus API in read‑write mode with the [api](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) directive: + + ```nginx + # ... + server { + listen 80; + server_name www.example.com; + + location /api { + api write=on; + } + } + ``` + + We strongly recommend [restricting access]({{< relref "/nginx/admin-guide/security-controls/controlling-access-proxied-http.md" >}}) to this location, for example by allowing access only from `localhost` (`127.0.0.1`), and by using HTTP basic authentication to restrict use of the `PATCH`, `POST`, and `DELETE` methods to a specified set of users: + + ```nginx + # ... + server { + listen 80; + server_name www.example.com; + + location /api { + api write=on; + + allow 127.0.0.1; + deny all; + + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + } + } + ``` + +3. Populate the key‑value database with the API's [POST](https://nginx.org/en/docs/http/ngx_http_api_module.html#postHttpKeyvalZoneData) method, supplying the data in JSON format. You can use the `curl` command as in the following example. If the zone is empty, you can enter several key‑value pairs at once; otherwise, pairs must be added one at a time. + + ```shell + $ curl -X POST -d '{ + "10.0.0.1": "1", + "10.0.0.2": "1", + "10.0.0.3": "0", + "10.0.0.4": "0" + }' -s http://www.example.com/api/6/http/keyvals/one + ``` + + If you have specified matching of IP addresses against network ranges (with the `type=ip` parameter of the [keyval_zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone) directive), send the `POST` command with the network range specified in CIDR notation: + + ```shell + $ curl -X POST -d '{ + "192.168.13.0/24": "1" + }' -s http://www.example.com/api/6/http/keyvals/one + ``` + +4. Define how client IP addresses are evaluated against the key‑value database, by including the [keyval](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval) directive in the [http](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) context. + + The directive takes advantage of the standard NGINX and NGINX Plus variable [`$remote_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_remote_addr), which is set to the client IP address automatically for every request. + + As it processes each request, NGINX Plus: + + - Looks up the first parameter (here, `$remote_addr`, preset to the client's IP address) in the key‑value database specified by the `zone=` parameter (here, **one**). + + - If a key in the database exactly matches `$remote_addr`, sets the second parameter (here, `$target`) to the value corresponding to the key. In our example, the value is `1` for denylisted addresses or `0` for allowlisted addresses. + + ```nginx + http { + # ... + keyval_zone zone=one:1m type=ip state=one.keyval; + keyval $remote_addr $target zone=one; # Client address is the key, + # $target is the value; + } + ``` + +5. Create a rule with the [if](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#if) directive that either allows or denies access depending on the client IP address. With this rule, access is allowed when `$target` is `0` and denied when it is `1`: + + ```nginx + if ($target) { + return 403; + } + ``` + + +## Managing the Key-Value Database + +You can use API methods to update a key‑value database dynamically, without requiring a reload of NGINX Plus. + +All of the following examples operate on the **one** zone, which is accessible at ****. + +- To get the list of all database entries for a zone: + + ```shell + curl -X GET 'http://www.example.com/api/6/http/keyvals/one' + ``` + + +- To update the value for an existing entry (in this example to change the access status for IP address `10.0.0.4` from allowlisted to denylisted): + + ```shell + curl -X PATCH -d '{"10.0.0.4": "1"}' -s 'http://www.example.com/api/6/http/keyvals/one' + ``` + +- To add an entry to a populated zone: + + ```shell + curl -X POST -d '{"10.0.0.5": "1"}' -s 'http://www.example.com/api/6/http/keyvals/one' + ``` + +- To delete an entry: + + ```shell + curl -X PATCH -d '{"10.0.0.4":null}' -s 'http://www.example.com/api/6/http/keyvals/one' + ``` + + + +## Full Example + +The full NGINX Plus configuration: + +```nginx +http { + # ... + keyval_zone zone=one:1m type=ip state=one.keyval; + keyval $remote_addr $target zone=one; + + server { + listen 80; + server_name www.example.com; + + location /api { + api write=on; + + allow 127.0.0.1; + deny all; + + limit_except GET { + auth_basic "NGINX Plus API"; + auth_basic_user_file /path/to/passwd/file; + } + } + + if ($target) { + return 403; + } + } +} +``` + +This configuration: + +- Creates a 1 MB keyval zone **one** that accepts network ranges and also creates the file **one.keyval** to make the database of key‑value pairs persists across reloads and restarts of NGINX Plus. + +- Enables the NGINX Plus API in write mode so that the zone can populated with IP addresses. + +- Enables lookup of the IP address `$remote_addr` in the key-value database as the key, and puts the value of the found key into the `$target` variable. + +- Enables a simple rule to check for the resulting value: if the value of `$target` is `1` (address is denylisted), return `403 (Forbidden)` to the client. + + +The following `curl` command populates the empty keyval zone **one** with IP addresses that are denylisted (value is `1`) or allowlisted (value is `0`): + +```shell +curl -X POST -d '{ + "10.0.0.1": "1", + "192.168.13.0/24": "1", + "10.0.0.3": "0", + "10.0.0.4": "0" +}' -s 'http://www.example.com/api/6/http/keyvals/one' +``` + + +## See Also + +- [Dynamic IP Denylisting with NGINX Plus and fail2ban](https://www.nginx.com/blog/dynamic-ip-denylisting-with-nginx-plus-and-fail2ban/) diff --git a/content/nginx/admin-guide/security-controls/securing-http-traffic-upstream.md b/content/nginx/admin-guide/security-controls/securing-http-traffic-upstream.md new file mode 100644 index 000000000..21d962392 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/securing-http-traffic-upstream.md @@ -0,0 +1,183 @@ +--- +description: Secure HTTP traffic between NGINX or F5 NGINX Plus and upstream servers, + using SSL/TLS encryption. +docs: DOCS-435 +doctypes: +- task +title: Securing HTTP Traffic to Upstream Servers +toc: true +weight: 900 +--- + +This article explains how to encrypt HTTP traffic between NGINX and a upstream group or a proxied server. + + +## Prerequisites + +- [NGINX Open Source](https://nginx.org/en/download.html) or [F5 NGINX Plus](https://nginx.com/products/) +- A [proxied server]({{< relref "../web-server/reverse-proxy.md" >}}) or an [upstream group of servers]({{< relref "../load-balancer/http-load-balancer.md" >}}) +- SSL certificates and a private key + +## Obtaining SSL Server Certificates + +You can purchase a server certificate from a trusted certificate authority (CA), or your can create own internal CA with an [OpenSSL](https://www.openssl.org/) library and generate your own certificate. The server certificate together with a private key should be placed on each upstream server. + + +## Obtaining an SSL Client Certificate + +NGINX will identify itself to the upstream servers by using an SSL client certificate. This client certificate must be signed by a trusted CA and is configured on NGINX together with the corresponding private key. + +You will also need to configure the upstream servers to require client certificates for all incoming SSL connections, and to trust the CA that issued NGINX’ client certificate. Then, when NGINX connects to the upstream, it will provide its client certificate and the upstream server will accept it. + + +## Configuring NGINX + +First, change the URL to an upstream group to support SSL connections. In the NGINX configuration file, specify the “`https`” protocol for the proxied server or an upstream group in the [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive: + +```nginx +location /upstream { + proxy_pass https://backend.example.com; +} +``` + +Add the client certificate and the key that will be used to authenticate NGINX on each upstream server with [proxy_ssl_certificate](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate) and [proxy_ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate_key) directives: + +```nginx +location /upstream { + proxy_pass https://backend.example.com; + proxy_ssl_certificate /etc/nginx/client.pem; + proxy_ssl_certificate_key /etc/nginx/client.key; +} +``` + +If you use a self-signed certificate for an upstream or your own CA, also include the [proxy_ssl_trusted_certificate](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_trusted_certificate). The file must be in the PEM format. Optionally, include the [proxy_ssl_verify](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify) and [proxy_ssl_verfiy_depth](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify_depth) directives to have NGINX check the validity of the security certificates: + +```nginx +location /upstream { + #... + proxy_ssl_trusted_certificate /etc/nginx/trusted_ca_cert.crt; + proxy_ssl_verify on; + proxy_ssl_verify_depth 2; + #... +} +``` + +Each new SSL connection requires a full SSL handshake between the client and server, which is quite CPU-intensive. To have NGINX proxy previously negotiated connection parameters and use a so-called abbreviated handshake, include the [proxy_ssl_session_reuse](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_session_reuse) directive: + +```nginx +location /upstream { + #... + proxy_ssl_session_reuse on; + #... +} +``` + +Optionally, you can specify which SSL protocols and ciphers are used: + +```nginx +location /upstream { + #... + proxy_ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + proxy_ssl_ciphers HIGH:!aNULL:!MD5; +} +``` + + +## Configuring Upstream Servers + +Each upstream server should be configured to accept HTTPS connections. For each upstream server, specify a path to the server certificate and the private key with [ssl_certificate](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate) and [ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate_key) directives: + +```nginx +server { + listen 443 ssl; + server_name backend1.example.com; + + ssl_certificate /etc/ssl/certs/server.crt; + ssl_certificate_key /etc/ssl/certs/server.key; + #... + location /yourapp { + proxy_pass https://url_to_app.com; + #... + } +} +``` + +Specify the path to a client certificate with the [ssl_client_certificate](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_client_certificate) directive: + +```nginx +server { + #... + ssl_client_certificate /etc/ssl/certs/ca.crt; + ssl_verify_client optional; + #... +} +``` + + +## Complete Example + +```nginx +http { + #... + upstream backend.example.com { + server backend1.example.com:443; + server backend2.example.com:443; + } + + server { + listen 80; + server_name www.example.com; + #... + + location /upstream { + proxy_pass https://backend.example.com; + proxy_ssl_certificate /etc/nginx/client.pem; + proxy_ssl_certificate_key /etc/nginx/client.key; + proxy_ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + proxy_ssl_ciphers HIGH:!aNULL:!MD5; + proxy_ssl_trusted_certificate /etc/nginx/trusted_ca_cert.crt; + + proxy_ssl_verify on; + proxy_ssl_verify_depth 2; + proxy_ssl_session_reuse on; + } + } + + server { + listen 443 ssl; + server_name backend1.example.com; + + ssl_certificate /etc/ssl/certs/server.crt; + ssl_certificate_key /etc/ssl/certs/server.key; + ssl_client_certificate /etc/ssl/certs/ca.crt; + ssl_verify_client optional; + + location /yourapp { + proxy_pass https://url_to_app.com; + #... + } + + server { + listen 443 ssl; + server_name backend2.example.com; + + ssl_certificate /etc/ssl/certs/server.crt; + ssl_certificate_key /etc/ssl/certs/server.key; + ssl_client_certificate /etc/ssl/certs/ca.crt; + ssl_verify_client optional; + + location /yourapp { + proxy_pass https://url_to_app.com; + #... + } + } +} +``` + +In this example, the “`https`” protocol in the [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive specifies that the traffic forwarded by NGINX to upstream servers be secured. + +When a secure connection is passed from NGINX to the upstream server for the first time, the full handshake process is performed. The [proxy_ssl_certificate](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate) directive defines the location of the PEM-format certificate required by the upstream server, the [proxy_ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate_key) directive defines the location of the certificate’s private key, and the [proxy_ssl_protocols](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_protocols) and [proxy_ssl_ciphers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_ciphers) directives control which protocols and ciphers are used. + +The next time NGINX passes a connection to the upstream server, session parameters will be reused because of the [proxy_ssl_session_reuse](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_session_reuse) directive, and the secured connection is established faster. + +The trusted CA certificates in the file named by the [proxy_ssl_trusted_certificate](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_trusted_certificate) directive are used to verify the certificate on the upstream. The [proxy_ssl_verify_depth](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify_depth) directive specifies that two certificates in the certificates chain are checked, and the [proxy_ssl_verify](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify) directive verifies the validity of certificates. diff --git a/content/nginx/admin-guide/security-controls/securing-tcp-traffic-upstream.md b/content/nginx/admin-guide/security-controls/securing-tcp-traffic-upstream.md new file mode 100644 index 000000000..de0d4b3e3 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/securing-tcp-traffic-upstream.md @@ -0,0 +1,120 @@ +--- +description: Secure TCP traffic between NGINX or F5 F5 NGINX Plus and upstream servers, + using SSL/TLS encryption. +docs: DOCS-436 +doctypes: +- task +title: Securing TCP Traffic to Upstream Servers +toc: true +weight: 1000 +--- + +This article explains how to secure TCP traffic between NGINX and a TCP upstream server or an upstream group of TCP servers. + +## Prerequisites + +- F5 NGINX Plus [R6]({{< relref "../../releases.md" >}}) and later or the latest NGINX Open Source compiled with the `--with-stream` and `with-stream_ssl_module` configuration parameters +- A proxied TCP server or an [upstream group of TCP servers]({{< relref "../load-balancer/tcp-udp-load-balancer.md" >}}) +- SSL certificates and a private key + +## Obtaining SSL Server Certificates + +First, you will need to get server certificates and a private key and put them on the upstream server or on each server in the upstream group. A certificate can be obtained from a trusted certificate authority (CA) or generated using an SSL library such as [OpenSSL](http://www.openssl.org/). + +Self-signed server certificates are used when you need to encrypt the connection between NGINX and the upstream server. However, these connections are vulnerable to a man-in-the-middle attack: an imposter can impersonate the upstream server and NGINX will not know it is talking to a fake server. If you obtain server certificates that have been signed by a trusted CA (you can create your own internal CA using OpenSSL), you can then configure NGINX to only trust certificates that have been signed by that CA. This makes it much more difficult for an attacker to impersonate an upstream server. + +## Obtaining an SSL Client Certificate + +NGINX can identify itself to the upstream servers by using an SSL Client Certificate. This client certificate must be signed by a trusted CA and stored on NGINX along with the corresponding private key. + +You will need to configure the upstream servers to require client certificates for all incoming SSL connections and to trust the CA that issued the client certificate to NGINX. Then, when NGINX connects to the upstream, it will provide its client certificate and the upstream server will accept it. + +## Configuring NGINX + +In the NGINX configuration file, include the [proxy_ssl](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl) directive in the `server` block on the `stream` level: + +```nginx +stream { + server { + ... + proxy_pass backend; + proxy_ssl on; + } +} +``` + +Then specify the path to the SSL client certificate required by the upstream server and the certificate’s private key: + +```nginx +server { + ... + proxy_ssl_certificate /etc/ssl/certs/backend.crt; + proxy_ssl_certificate_key /etc/ssl/certs/backend.key; +} +``` + +Optionally, you can specify which SSL protocols and ciphers are used: + +```nginx +server { + ... + proxy_ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + proxy_ssl_ciphers HIGH:!aNULL:!MD5; +} +``` + +If you use certificates issued by a CA, also include the [proxy_ssl_trusted_certificate](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_trusted_certificate) directive to name the file containing the trusted CA certificates used to verify the upstream’s security certificates. The file must be in the PEM format. Optionally, include the [proxy_ssl_verify](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_verify) and [proxy_ssl_verfiy_depth](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_verify_depth) directives to have NGINX check the validity of the security certificates: + +```nginx +server { + ... + proxy_ssl_trusted_certificate /etc/ssl/certs/trusted_ca_cert.crt; + proxy_ssl_verify on; + proxy_ssl_verify_depth 2; +} +``` + +Each new SSL connection requires a full SSL handshake between the client and server, which is quite CPU-intensive. To have NGINX proxy previously negotiated connection parameters and use a so-called abbreviated handshake, include the [proxy_ssl_session_reuse](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_session_reuse) directive: + +```nginx +proxy_ssl_session_reuse on; +``` + +## Complete Example + +```nginx +stream { + + upstream backend { + server backend1.example.com:12345; + server backend2.example.com:12345; + server backend3.example.com:12345; + } + + server { + listen 12345; + proxy_pass backend; + proxy_ssl on; + + proxy_ssl_certificate /etc/ssl/certs/backend.crt; + proxy_ssl_certificate_key /etc/ssl/certs/backend.key; + proxy_ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + proxy_ssl_ciphers HIGH:!aNULL:!MD5; + proxy_ssl_trusted_certificate /etc/ssl/certs/trusted_ca_cert.crt; + + proxy_ssl_verify on; + proxy_ssl_verify_depth 2; + proxy_ssl_session_reuse on; + } +} +``` + +In this example, the [proxy_ssl](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl) directive specifies that TCP traffic forwarded by NGINX to upstream servers be secured. + +When a secure TCP connection is passed from NGINX to the upstream server for the first time, the full handshake process is performed. The upstream server asks NGINX to present a security certificate specified in the [proxy_ssl_certificate](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_certificate) directive. The [proxy_ssl_protocols](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_protocols) and [proxy_ssl_ciphers](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_ciphers) directives control which protocols and ciphers are used. + +The next time NGINX passes a connection to the upstream, session parameters will be reused because of the [proxy_ssl_session_reuse](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_session_reuse) directive, and the secured TCP connection is established faster. + +The trusted CA certificates in the file named by the [proxy_ssl_trusted_certificate](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_trusted_certificate) directive are used to verify the certificate on the upstream server. The [proxy_ssl_verify_depth](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_verify_depth) directive specifies that two certificates in the certificates chain are checked, and the [proxy_ssl_verify](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_verify) directive verifies the validity of certificates. + +To learn more about NGINX Plus, please see our [commercial subscriptions](https://nginx.com/products/). diff --git a/content/nginx/admin-guide/security-controls/terminating-ssl-http.md b/content/nginx/admin-guide/security-controls/terminating-ssl-http.md new file mode 100644 index 000000000..cae572dc6 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/terminating-ssl-http.md @@ -0,0 +1,326 @@ +--- +description: Terminate HTTPS traffic from clients, relieving your upstream web and + application servers of the computational load of SSL/TLS encryption. +docs: DOCS-437 +doctypes: +- task +title: NGINX SSL Termination +toc: true +weight: 100 +--- + +This section describes how to configure an HTTPS server on NGINX and F5 NGINX Plus. + + +## Setting up an HTTPS Server + +To set up an HTTPS server, in your **nginx.conf** file include the `ssl` parameter to the [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive in the [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) block, then specify the locations of the server certificate and private key files: + +```nginx +server { + listen 443 ssl; + server_name www.example.com; + ssl_certificate www.example.com.crt; + ssl_certificate_key www.example.com.key; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; + #... +} +``` + +The server certificate is a public entity. It is sent to every client that connects to the NGINX or NGINX Plus server. The private key is a secure entity and should be stored in a file with restricted access. However, the NGINX master process must be able to read this file. Alternatively, the private key can be stored in the same file as the certificate: + +```nginx +ssl_certificate www.example.com.cert; +ssl_certificate_key www.example.com.cert; +``` + +In this case it is important to restrict access to the file. Note that although the certificate and the key are stored in one file in this case, only the certificate is sent to clients. + +The [ssl_protocols](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) and [ssl_ciphers](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers ) directives can be used to require that clients use only the strong versions and ciphers of SSL/TLS when establishing connections. + +Since version 1.9.1, NGINX uses these defaults: + +```nginx +ssl_protocols TLSv1 TLSv1.1 TLSv1.2; +ssl_ciphers HIGH:!aNULL:!MD5; +``` + +Vulnerabilities are sometimes found in the design of older ciphers, and we recommend disabling them in a modern NGINX configuration (unfortunately, the default configuration cannot easily be changed because of backward compatibility for existing NGINX deployments). Please note that CBC-mode ciphers might be vulnerable to a number of attacks (the BEAST attack in particular as described in [CVE-2011-3389](https://nvd.nist.gov/vuln/detail/CVE-2011-3389)), and we recommend not using SSLv3 due to the [POODLE](https://nvd.nist.gov/vuln/detail/CVE-2014-3566) attack, unless you need to support legacy clients. + + + +### OCSP Validation of Client Certificates + +NGINX can be configured to use Online Certificate Status Protocol (OCSP) to check the validity of X.509 client certificates as they are presented. An OCSP request for the client certificate status is sent to an OCSP responder which checks the certificate validity and returns the response with the certificate status: + +- `Good` - the certificate is not revoked +- `Revoked` - the certificate is revoked +- `Unknown` - no information is available about the client certificate + +To enable OCSP validation of SSL client certificates, specify the [ssl_ocsp](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ocsp) directive along with the [ssl_verify_client](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_verify_client) directive, which enables certificate verification: + +```nginx +server { + listen 443 ssl; + + ssl_certificate /etc/ssl/foo.example.com.crt; + ssl_certificate_key /etc/ssl/foo.example.com.key; + + ssl_verify_client on; + ssl_trusted_certificate /etc/ssl/cachain.pem; + ssl_ocsp on; # Enable OCSP validation + + #... +} +``` + +NGINX sends the OCSP request to the OCSP URI embedded in the client certificate unless a different URI is defined with the [ssl_ocsp_responder](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ocsp_responder) directive. `Only http://` OCSP responders are supported: + +```nginx +#... +ssl_ocsp_responder http://ocsp.example.com/; +#... +``` + +To cache OCSP responses in a single memory zone shared by all worker processes, specify the [ssl_ocsp_cache](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ocsp_cache) directive to define the name and size of the zone. Responses are cached for `1` hour unless the `nextUpdate`value in the OCSP response specifies a different value: + +```nginx +#... +ssl_ocsp_cache shared:one:10m; +#... +``` + +The result of the client certificate validation is available in the [`$ssl_client_verify`](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#var_ssl_client_verify) variable, including the reason for OCSP failure. + + + +## HTTPS Server Optimization + +SSL operations consume extra CPU resources. The most CPU-intensive operation is the SSL handshake. There are two ways to minimize the number of these operations per client: + +- Enabling keepalive connections to send several requests via one connection +- Reusing SSL session parameters to avoid SSL handshakes for parallel and subsequent connections + +Sessions are stored in the SSL session cache shared between worker processes and configured by the [ssl_session_cache](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) directive. One megabyte of cache contains about 4000 sessions. The default cache timeout is 5 minutes. This timeout can be increased using the [ssl_session_timeout](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_timeout) directive. Below is a sample configuration optimized for a multi-core system with 10 megabyte shared session cache: + +```nginx +worker_processes auto; + +http { + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + server { + listen 443 ssl; + server_name www.example.com; + keepalive_timeout 70; + + ssl_certificate www.example.com.crt; + ssl_certificate_key www.example.com.key; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; + #... + } +} +``` + + +## SSL Certificate Chains + +Some browsers may complain about a certificate signed by a well-known certificate authority, while other browsers may accept the certificate without issues. This occurs because the issuing authority has signed the server certificate using an intermediate certificate that is not present in the base of well-known trusted certificate authorities which is distributed in a particular browser. In this case the authority provides a bundle of chained certificates that should be concatenated to the signed server certificate. The server certificate must appear before the chained certificates in the combined file: + +```shell +cat www.example.com.crt bundle.crt > www.example.com.chained.crt +``` + +The resulting file should be used in the [ssl_certificate](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate) directive: + +```nginx +server { + listen 443 ssl; + server_name www.example.com; + ssl_certificate www.example.com.chained.crt; + ssl_certificate_key www.example.com.key; + #... +} +``` + +If the server certificate and the bundle have been concatenated in the wrong order, NGINX fails to start and displays the following error message: + +```none +SSL_CTX_use_PrivateKey_file(" ... /www.example.com.key") failed + (SSL: error:0B080074:x509 certificate routines: + X509_check_private_key:key values mismatch) +``` + +The error happens because NGINX has tried to use the private key with the bundle’s first certificate instead of the server certificate. + +Browsers usually store intermediate certificates which they receive and are signed by trusted authorities. So actively used browsers may already have the required intermediate certificates and may not complain about a certificate sent without a chained bundle. To ensure the server sends the complete certificate chain the openssl command-line utility may be used: + +```shell +openssl s_client -connect www.godaddy.com:443 +... +Certificate chain + 0 s:/C=US/ST=Arizona/L=Scottsdale/1.3.6.1.4.1.311.60.2.1.3=US + /1.3.6.1.4.1.311.60.2.1.2=AZ/O=GoDaddy.com, Inc + /OU=MIS Department/CN=www.GoDaddy.com + /serialNumber=0796928-7/2.5.4.15=V1.0, Clause 5.(b) + i:/C=US/ST=Arizona/L=Scottsdale/O=GoDaddy.com, Inc. + /OU=http://certificates.godaddy.com/repository + /CN=Go Daddy Secure Certification Authority + /serialNumber=07969287 + 1 s:/C=US/ST=Arizona/L=Scottsdale/O=GoDaddy.com, Inc. + /OU=http://certificates.godaddy.com/repository + /CN=Go Daddy Secure Certification Authority + /serialNumber=07969287 + i:/C=US/O=The Go Daddy Group, Inc. + /OU=Go Daddy Class 2 Certification Authority + 2 s:/C=US/O=The Go Daddy Group, Inc. + /OU=Go Daddy Class 2 Certification Authority + i:/L=ValiCert Validation Network/O=ValiCert, Inc. + /OU=ValiCert Class 2 Policy Validation Authority + /CN=http://www.valicert.com//emailAddress=info@valicert.com +... +``` + +In this example the subject (“`s`”) of the `www.GoDaddy.com` server certificate `#0` is signed by an issuer (`“i”`) which itself is the subject of certificate #1. Certificate #1 is signed by an issuer which itself is the subject of certificate #2. This certificate, however, is signed by the well‑known issuer `ValiCert, Inc.` whose certificate is stored in the browsers themselves. + +If a certificate bundle has not been added, only the server certificate (#0) is shown. + + +## A Single HTTP/HTTPS Server + +It is possible to configure a single server that handles both HTTP and HTTPS requests by placing one `listen` directive with the `ssl` parameter and one without in the same virtual server: + +```nginx +server { + listen 80; + listen 443 ssl; + server_name www.example.com; + ssl_certificate www.example.com.crt; + ssl_certificate_key www.example.com.key; + #... +} +``` + +In NGINX version 0.7.13 and earlier, SSL cannot be enabled selectively for individual listening sockets, as shown above. SSL can only be enabled for the entire server using the [ssl](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl) directive, making it impossible to set up a single HTTP/HTTPS server. The `ssl` parameter to the [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive was added to solve this issue. The [ssl](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl) directive therefore is deprecated in version 0.7.14 and later. + + +## Name-Based HTTPS Servers + +A common issue arises when two or more HTTPS servers are configured to listen on a single IP address: + +```nginx +server { + listen 443 ssl; + server_name www.example.com; + ssl_certificate www.example.com.crt; + #... +} + +server { + listen 443 ssl; + server_name www.example.org; + ssl_certificate www.example.org.crt; + #... +} +``` + +With this configuration, a browser receives the default server’s certificate. In this case, it is `www.example.com` regardless of the requested server name. This is caused by the behavior of the SSL protocol itself. The SSL connection is established before the browser sends an HTTP request and NGINX does not know the name of the requested server. Therefore, it may only offer the default server’s certificate. + +The best way to solve this issue is to assign a separate IP address to every HTTPS server: + +```nginx +server { + listen 192.168.1.1:443 ssl; + server_name www.example.com; + ssl_certificate www.example.com.crt; + #... +} + +server { + listen 192.168.1.2:443 ssl; + server_name www.example.org; + ssl_certificate www.example.org.crt; + #... +} +``` + +Note that there are also some specific proxy settings for HTTPS upstreams ([proxy_ssl_ciphers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_ciphers), [proxy_ssl_protocols](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_protocols), and [proxy_ssl_session_reuse](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_session_reuse)) which can be used for fine‑tuning SSL between NGINX and upstream servers. You can read more about these in the [HTTP proxy module documentation](https://nginx.org/en/docs/http/ngx_http_proxy_module.html). + +### An SSL Certificate With Several Names + +There are other ways to share a single IP address among several HTTPS servers. However, all of them have drawbacks. One way is to use a certificate with several names in the `SubjectAltName` certificate field, for example, `www.example.com` and `www.example.org`. However, the length of the `SubjectAltName` field is limited. + +Another way is to use a certificate with a wildcard name, for example, `*.example.org`. A wildcard certificate secures all subdomains of the specified domain, but only on one level. This certificate matches `www.example.org`, but does not match `example.org` or `www.sub.example.org`. These two methods can also be combined. A certificate may contain exact and wildcard names in the `SubjectAltName` field. For example, `example.org` and `*.example.org`. + +It is better to place a certificate file with several names and its private key file at the http level of your configuration so that they inherit the single memory copy across all servers: + +```nginx +ssl_certificate common.crt; +ssl_certificate_key common.key; + +server { + listen 443 ssl; + server_name www.example.com; + #... +} + +server { + listen 443 ssl; + server_name www.example.org; + #... +} +``` + +### Server Name Indication + +A more generic solution for running several HTTPS servers on a single IP address is the [TLS Server Name Indication](https://en.wikipedia.org/wiki/Server_Name_Indication) (SNI) extension ([RFC 6066](https://tools.ietf.org/html/rfc6066)), which allows a browser to pass a requested server name during the SSL handshake. With this solution, the server will know which certificate it should use for the connection. However, SNI has limited browser support. Currently it is supported starting with the following browser versions: + +- Opera 8.0 +- MSIE 7.0 (but only on Windows Vista or higher) +- Firefox 2.0 and other browsers using Mozilla Platform rv:1.8.1 +- Safari 3.2.1 (Windows version supports SNI on Vista or higher) +- Chrome (Windows version supports SNI on Vista or higher, too) + +Only domain names can be passed in SNI. However, some browsers will pass the IP address of the server as its name if a request includes a literal IP address. It is best not to rely on this. + +In order to use SNI in NGINX, it must be supported in both the OpenSSL library with which the NGINX binary has been built, as well as the library with which it is being dynamically linked at runtime. OpenSSL supports SNI since the version 0.9.8f if it was built with configuration `option --enable-tlsext`. Since OpenSSL version 0.9.8j, this option is enabled by default. If NGINX was built with SNI support, NGINX shows the following when run with the `-V` switch: + +```shell +nginx -V +... +TLS SNI support enabled +... +``` + +However, if the SNI-enabled NGINX is linked dynamically to an OpenSSL library without SNI support, NGINX displays the warning: + +```none +NGINX was built with SNI support, however, now it is linked +dynamically to an OpenSSL library which has no tlsext support, +therefore SNI is not available +``` + +## Compatibility Notes + +- The SNI support status has been shown by the `-V` switch since versions 0.8.21 and 0.7.62. + +- The `ssl` parameter to the [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive has been supported since version 0.7.14. Prior to version 0.8.21 it could only be specified along with the `default` parameter. + +- SNI has been supported since version 0.5.23. +- The shared SSL session cache has been supported since version 0.5.6. + +- Version 1.9.1 and later: the default SSL protocols are `TLSv1`, `TLSv1.1`, and `TLSv1.2` (if supported by the OpenSSL library). +- From versions 0.7.65 and 0.8.19 and later, the default SSL protocols are `SSLv3`, `TLSv1`, `TLSv1.1`, and `TLSv1.2` (if supported by the OpenSSL library). + +- In versions 0.7.64 and 0.8.18 and earlier, the default SSL protocols are `SSLv2`, `SSLv3`, and `TLSv1`. + +- In version 1.0.5 and later, the default SSL ciphers are `HIGH:!aNULL:!MD5`. + +- In versions 0.7.65 and 0.8.20 and later, the default SSL ciphers are `HIGH:!ADH:!MD5`. + +- From version 0.8.19 the default SSL ciphers are `ALL:!ADH:RC4+RSA:+HIGH:+MEDIUM`. + +- From version 0.7.64, 0.8.18 and earlier the default SSL ciphers are `ALL:!ADH:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP`. diff --git a/content/nginx/admin-guide/security-controls/terminating-ssl-tcp.md b/content/nginx/admin-guide/security-controls/terminating-ssl-tcp.md new file mode 100644 index 000000000..6a8c71985 --- /dev/null +++ b/content/nginx/admin-guide/security-controls/terminating-ssl-tcp.md @@ -0,0 +1,171 @@ +--- +description: Terminate SSL/TLS-encrypted traffic from clients, relieving your upstream + TCP servers of the computational load. +docs: DOCS-438 +doctypes: +- task +title: SSL Termination for TCP Upstream Servers +toc: true +weight: 200 +--- + +This article explains how to set up SSL termination for F5 NGINX Plus and a load-balanced group of servers that accept TCP connections. + + +## What is SSL Termination? + +SSL termination means that NGINX Plus acts as the server-side SSL endpoint for connections with clients: it performs the decryption of requests and encryption of responses that backend servers would otherwise have to do. The operation is called termination because NGINX Plus closes the client connection and forwards the client data over a newly created, unencrypted connection to the servers in an upstream group. In release R6 and later, NGINX Plus performs SSL termination for TCP connections as well as HTTP connections. + +## Prerequisites + +- [NGINX Plus R6]({{< relref "../../releases.md#r6 " >}}) or later +- A load-balanced [upstream group]({{< relref "../load-balancer/tcp-udp-load-balancer.md" >}}) with several TCP servers +- SSL certificates and a private key (obtained or self-generated) + +## Obtaining SSL Certificates + +First, you will need to obtain server certificates and a private key and put them on the server. A certificate can be obtained from a trusted certificate authority (CA) or generated using an SSL library such as [OpenSSL](https://www.openssl.org/). + +## Configuring NGINX Plus + +To configure SSL termination, add the following directives to the NGINX Plus configuration: + +### Enabling SSL + +To enable SSL, specify the `ssl` parameter of the [listen](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) directive for the TCP server that passes connections to an upstream server group: + +```nginx +stream { + + server { + listen 12345 ssl; + proxy_pass backend; + #... + } +} +``` + +### Adding SSL Certificates + +To add SSL certificates, specify the path to the certificates (which must be in the PEM format) with the [ssl_certificate](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_certificate) directive, and specify the path to the private key in the [ssl_certificate_key](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_certificate_key) directive: + +```nginx +server { + #... + ssl_certificate /etc/ssl/certs/server.crt; + ssl_certificate_key /etc/ssl/certs/server.key; +} +``` + +Additionally, the [ssl_protocols](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_protocols) and [ssl_ciphers](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_ciphers) directives can be used to limit connections and to include only the strong versions and ciphers of SSL/TLS: + +```nginx +server { + #... + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; +} +``` + +The [ssl_ciphers](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_ciphers) directive tells NGINX to inform the SSL library which ciphers it prefers. + +## Speeding up Secure TCP Connections + +Implementing SSL/TLS can significantly impact server performance, because the SSL handshake operation (a series of messages the client and server exchange to verify that the connection is trusted) is quite CPU-intensive. The default timeout for the SSL handshake is 60 seconds and it can be redefined with the [ssl_handshake_timeout](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_handshake_timeout) directive. We do not recommend setting this value too low or too high, as that might result either in handshake failure or a long time to wait for the handshake to complete: + +```nginx +server { + #... + ssl_handshake_timeout 10s; +} +``` + +### Optimizing the SSL Session Cache + +Creating a cache of the session parameters that apply to each SSL/TLS connection reduces the number of handshakes and thus can significantly improve performance. Caching is set with the [ssl_session_cache](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_cache) directive: + +```nginx +ssl_session_cache; +``` + +By default, NGINX Plus uses the `built-in` type of the session cache, which means the cache built in your SSL library. This is not optimal, because such a cache can be used by only one worker process and can cause memory fragmentation. Set the `ssl_session_cache` directive to `shared` to share the cache among all worker processes, which speeds up later connections because the connection setup information is already known: + +```nginx +ssl_session_cache shared:SSL:1m; +``` + +As a reference, a 1-MB shared cache can hold approximately 4,000 sessions. + +By default, NGINX Plus retains cached session parameters for five minutes. Increasing the value of the [ssl_session_timeout](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_timeout) to several hours can improve performance because reusing cached session parameters reduces the number of time-consuming handshakes. When you increase the timeout, the cache needs to be bigger to accommodate the larger number of cached parameters that results. For the 4-hour timeout in the following example, a 20-MB cache is appropriate: + +```nginx +ssl_session_timeout 4h; +``` + +If the timeout length is increased, you need a larger cache to store sessions, for example, 20 MB: + +```nginx +server { + #... + ssl_session_cache shared:SSL:20m; + ssl_session_timeout 4h; +} +``` + +These lines create an in-memory cache of 20 MB to store session information, and instruct NGINX Plus to reuse session parameters from the cache for 4 hours after the moment they were added. + +### Session Tickets + +Session tickets are an alternative to the session cache. Session information is stored on the client side, eliminating the need for a server-side cache to store session information. When a client resumes interaction with the backend server, it presents the session ticket and re-negotiation is not necessary. Set the [ssl_session_tickets](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_tickets) directive to `on`: + +```nginx +server { + #... + ssl_session_tickets on; +} +``` + +When using session tickets for an upstream group, each upstream server must be initialized with the same session key. It’s a best practice to change session keys frequently, we recommend that you implement a mechanism to rotate the shared key across all upstream servers: + +```nginx +server { + #... + ssl_session_tickets on; + ssl_session_ticket_key /etc/ssl/session_ticket_keys/current.key; + ssl_session_ticket_key /etc/ssl/session_ticket_keys/previous.key; +} +``` + +## Complete Example + +```nginx +stream { + upstream stream_backend { + server backend1.example.com:12345; + server backend2.example.com:12345; + server backend3.example.com:12345; + } + + server { + listen 12345 ssl; + proxy_pass stream_backend; + + ssl_certificate /etc/ssl/certs/server.crt; + ssl_certificate_key /etc/ssl/certs/server.key; + ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_session_cache shared:SSL:20m; + ssl_session_timeout 4h; + ssl_handshake_timeout 30s; + #... + } +} +``` + +In this example, the directives in the `server` block instruct NGINX Plus to terminate and decrypt secured TCP traffic from clients and pass it unencrypted to the upstream group `stream_backend` which consists of three servers. + +The `ssl` parameter of the [listen](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) directive instructs NGINX Plus to accept SSL connections. When a clent requests a secure TCP connection, NGINX Plus starts the handshake process, which uses the PEM-format certificate specified by the [ssl_certificate](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_certificate) directive, the certificate’s private key specified by the [ssl_certificate_key](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_certificate_key) directive, and the protocols and cyphers listed by the [ssl_protocols](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_protocols) and [ssl_ciphers](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_ciphers) directives. + +As soon as the secure TCP connection is established, NGINX Plus caches the session parameters according to the [ssl_session_cache](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_cache) directive. In the example, the session cache is shared between all worker processes (the `shared` parameter), is 20 MB in size (the `20m` parameter), and retains each SSL session for reuse for 4 hours (the [ssl_session_timeout](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_timeout) directive). + + diff --git a/content/nginx/admin-guide/web-server/_index.md b/content/nginx/admin-guide/web-server/_index.md new file mode 100644 index 000000000..6c7c17bcb --- /dev/null +++ b/content/nginx/admin-guide/web-server/_index.md @@ -0,0 +1,9 @@ +--- +description: Documentation explaining how to configure NGINX and F5 NGINX Plus as a web + server, reverse proxy, or application gateway. +menu: + docs: + parent: NGINX Plus +title: Web Server +weight: 500 +--- diff --git a/content/nginx/admin-guide/web-server/app-gateway-uwsgi-django.md b/content/nginx/admin-guide/web-server/app-gateway-uwsgi-django.md new file mode 100644 index 000000000..593e3061e --- /dev/null +++ b/content/nginx/admin-guide/web-server/app-gateway-uwsgi-django.md @@ -0,0 +1,108 @@ +--- +description: Configure NGINX and F5 NGINX Plus as an application gateway for uWSGI and + Django. +docs: DOCS-439 +doctypes: +- task +title: Using NGINX and NGINX Plus as an Application Gateway with uWSGI and Django +toc: true +weight: 500 +--- + +This article explains how to use NGINX or F5 NGINX Plus as an application gateway with uWSGI and Django. + + +## Introduction + +NGINX is a high‑performance, scalable, secure, and reliable web server and a reverse proxy. NGINX enables all the main web acceleration techniques for managing HTTP connections and traffic. For many years, NGINX capabilities such as [load balancing]({{< relref "../load-balancer/http-load-balancer.md" >}}), [SSL termination]({{< relref "../security-controls/terminating-ssl-http.md" >}}), connection and request [policing]({{< relref "../security-controls/controlling-access-proxied-http.md" >}}), static [content offload]({{< relref "../web-server/serving-static-content.md" >}}), and [content caching]({{< relref "../content-cache/content-caching.md" >}}) have helped NGINX users to build reliable and fast websites quickly and efficiently. + +NGINX can also act as a secure application gateway, offering a number of specialized built‑in interfaces to pass traffic from users to applications. In this regard, not only can NGINX proxy HTTP and HTTPS traffic to an HTTP‑enabled application container, it can also directly talk to most of the popular lightweight application servers and web frameworks via optimized app‑gateway interfaces implemented in modules like [FastCGI](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html), [Memcached](https://nginx.org/en/docs/http/ngx_http_memcached_module.html), [scgi](https://nginx.org/en/docs/http/ngx_http_scgi_module.html), and [uwsgi](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html). + +Most commonly used application containers have embedded external HTTP interfaces with some routing capabilities, but one important reason to use NGINX as an application gateway is that it provides an all‑in‑one solution for HTTP connection management, load balancing, content caching, and traffic security. The application backend sits securely behind NGINX for better scalability and performance. It is also very easy to cluster application instances behind NGINX to build highly available applications. + + +## About uWSGI and Django + +A few words about "specialized interfaces". As useful as it is, HTTP has never been optimized for modern, lightweight application‑deployment scenarios. In recent years, a number of standardized interfaces have evolved for use with various application frameworks and application containers. One of these interfaces is the Web Server Gateway Interface ([WSGI](http://wsgi.readthedocs.org/en/latest/)), an interface between a web server/proxy and Python‑based applications. + +One of the most commonly used application servers offering the [uwsgi](http://uwsgi-docs.readthedocs.org/en/latest/Protocol.html) protocol – its own implementation of the WSGI protocol – is the [uWSGI application server container](https://github.com/unbit/uwsgi). + +Other than that, the uWSGI application server supports HTTP, FastCGI, and SCGI – with the uwsgi protocol being recommended as the fastest way to talk to applications. + + +## Configuring NGINX and NGINX Plus for Use with uWSGI and Django + +This document provides an example of how to configure NGINX and NGINX Plus for use with a [uWSGI](http://uwsgi-docs.readthedocs.org/en/latest/) server and a Python development environment. + +NGINX 0.8.40 and later (and all releases of NGINX Plus) includes native support for passing traffic from users to Python applications via the uwsgi protocol. If you download [NGINX Open Source binaries or source](https://nginx.org/en/download.html) from our official repositories, or [NGINX Plus from the customer portal](https://account.f5.com/myf5), you don’t have to do anything to enable support for the uwsgi protocol – NGINX and NGINX Plus support uswgi by default. + +Configuring the uWSGI application container itself is outside the scope of this document,; refer to the excellent [Quickstart for Python/WSGI applications](http://uwsgi-docs.readthedocs.org/en/latest/WSGIquickstart.html) for more information. + +[Django](https://www.djangoproject.com/) is probably the most commonly used Python web framework, so for simplicity's sake the example uses a Django‑based setup for the Python app. The [Django documentation](https://docs.djangoproject.com/en/1.11/) provides extensive information on how to configure a Django environment. + +For illustrative purposes only, this is one way you might invoke your uWSGI server with Django: + +```none + --chdir=/var/django/projects/myapp \ + --module=myapp.wsgi:application \ + --env DJANGO_SETTINGS_MODULE=myapp.settings \ + --master --pidfile=/usr/local/var/run/uwsgi/project-master.pid \ + --socket=127.0.0.1:29000 \ + --processes=5 \ + --uid=505 --gid=505 \ + --harakiri=20 \ + --max-requests=5000 \ + --vacuum \ + --daemonize=/usr/local/var/log/uwsgi/myapp.log +``` + +With these options in place, here's a sample NGINX configuration for use with a Django project: + +```nginx +http { + # ... + upstream django { + server 127.0.0.1:29000; + } + + server { + listen 80; + server_name myapp.example.com; + root /var/www/myapp/html; + + location / { + index index.html; + } + + location /static/ { + alias /var/django/projects/myapp/static/; + } + + location /main { + include /etc/nginx/uwsgi_params; + uwsgi_pass django; + uwsgi_param Host $host; + uwsgi_param X-Real-IP $remote_addr; + uwsgi_param X-Forwarded-For $proxy_add_x_forwarded_for; + uwsgi_param X-Forwarded-Proto $http_x_forwarded_proto; + } + } +} +``` + +Notice that the configuration defines an upstream called **django**. The port number on the server in the group, 29000, matches the one the uWSGI server binds to, as specified by the `socket` argument in the sample `uwsgi` command. + +Serving of static content is offloaded to NGINX or NGINX Plus, which serves it directly from **/var/django/projects/myapp/static**. Traffic to the application at **/main** is proxied and bridged from HTTP to the uwsgi protocol and passed to the Django app running inside a uWSGI application container. + + +## Conclusion + +Lightweight, heterogeneous application environments are becoming an increasingly popular way of building and deploying modern web applications. Newer, standardized application interface protocols like uwsgi and FastCGI enable faster communication between users and applications. + +Using NGINX and NGINX Plus in front of an application container has become a common way to free applications from the burden of HTTP traffic management, and to protect the application from unexpected spikes of user traffic, malicious behavior, denial‑of‑service (DoS) attacks, and more. Unbundling real‑world, external HTTP traffic from the actual application allows the developer to fully focus on the application logic, and leave the web acceleration and fundamental HTTP traffic security tasks to NGINX or NGINX Plus. + + +## Resources + +- [NGINX support](https://uwsgi-docs.readthedocs.io/en/latest/Nginx.html) in the uWSGI project documentation +- [How to use Django with uWSGI](https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/uwsgi/) in the Django project documentation diff --git a/content/nginx/admin-guide/web-server/compression.md b/content/nginx/admin-guide/web-server/compression.md new file mode 100644 index 000000000..e50966ec5 --- /dev/null +++ b/content/nginx/admin-guide/web-server/compression.md @@ -0,0 +1,98 @@ +--- +description: Compress server responses, or decompress them for clients that don't + support compression, to improve delivery speed and reduce overhead on the server. +docs: DOCS-440 +doctypes: +- task +title: Compression and Decompression +toc: true +weight: 400 +--- + +This section describes how to configure compression or decompression of responses, as well as sending compressed files. + +## Introduction + +Compressing responses often significantly reduces the size of transmitted data. However, since compression happens at runtime it can also add considerable processing overhead which can negatively affect performance. NGINX performs compression before sending responses to clients, but does not “double compress” responses that are already compressed (for example, by a proxied server). + +## Enabling Compression + +To enable compression, include the [gzip](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip) directive with the `on` parameter. + +```nginx +gzip on; +``` + +By default, NGINX compresses responses only with MIME type `text/html`. To compress responses with other MIME types, include the [gzip_types](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_types) directive and list the additional types. + +```nginx +gzip_types text/plain application/xml; +``` + +To specify the minimum length of the response to compress, use the [gzip_min_length](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_min_length) directive. The default is 20 bytes (here adjusted to 1000): + +```nginx +gzip_min_length 1000; +``` + +By default, NGINX does not compress responses to proxied requests (requests that come from the proxy server). The fact that a request comes from a proxy server is determined by the presence of the `Via` header field in the request. To configure compression of these responses, use the [gzip_proxied](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_proxied) directive. The directive has a number of parameters specifying which kinds of proxied requests NGINX should compress. For example, it is reasonable to compress responses only to requests that will not be cached on the proxy server. For this purpose the `gzip_proxied` directive has parameters that instruct NGINX to check the `Cache-Control` header field in a response and compress the response if the value is `no-cache`, `no-store`, or `private`. In addition, you must include the `expired` parameter to check the value of the `Expires` header field. These parameters are set in the following example, along with the `auth` parameter, which checks for the presence of the `Authorization` header field (an authorized response is specific to the end user and is not typically cached): + +```nginx +gzip_proxied no-cache no-store private expired auth; +``` + +As with most other directives, the directives that configure compression can be included in the `http` context or in a `server` or `location` configuration block. + +The overall configuration of gzip compression might look like this. + +```nginx +server { + gzip on; + gzip_types text/plain application/xml; + gzip_proxied no-cache no-store private expired auth; + gzip_min_length 1000; + ... +} +``` + +## Enabling Decompression + +Some clients do not support responses with the `gzip` encoding method. At the same time, it might be desirable to store compressed data, or compress responses on the fly and store them in the cache. To successfully serve both clients that do and do not accept compressed data, NGINX can decompress data on the fly when sending it to the latter type of client. + +To enable runtime decompression, use the [gunzip](https://nginx.org/en/docs/http/ngx_http_gunzip_module.html#gunzip) directive. + +```nginx +location /storage/ { + gunzip on; + ... +} +``` + +The `gunzip` directive can be specified in the same context as the `gzip` directive: + +```nginx +server { + gzip on; + gzip_min_length 1000; + gunzip on; + ... +} +``` + +Note that this directive is defined in a separate [module](https://nginx.org/en/docs/http/ngx_http_gunzip_module.html) that might not be included in an NGINX Open Source build by default. + +## Sending Compressed Files + +To send a compressed version of a file to the client instead of the regular one, set the [gzip_static](https://nginx.org/en/docs/http/ngx_http_gzip_static_module.html#gzip_static) directive to `on` within the appropriate context. + +```nginx +location / { + gzip_static on; +} +``` + +In this case, to service a request for **/path/to/file**, NGINX tries to find and send the file **/path/to/file.gz**. If the file doesn’t exist, or the client does not support gzip, NGINX sends the uncompressed version of the file. + +Note that the `gzip_static` directive does not enable on-the-fly compression. It merely uses a file compressed beforehand by any compression tool. To compress content (and not only static content) at runtime, use the `gzip` directive. + +This directive is defined in a separate [module](https://nginx.org/en/docs/http/ngx_http_gzip_static_module.html) that might not be included in an NGINX Open Source build by default. diff --git a/content/nginx/admin-guide/web-server/reverse-proxy.md b/content/nginx/admin-guide/web-server/reverse-proxy.md new file mode 100644 index 000000000..12f83668d --- /dev/null +++ b/content/nginx/admin-guide/web-server/reverse-proxy.md @@ -0,0 +1,137 @@ +--- +description: Configure NGINX as a reverse proxy for HTTP and other protocols, with + support for modifying request headers and fine-tuned buffering of responses. +docs: DOCS-441 +doctypes: +- task +title: NGINX Reverse Proxy +toc: true +weight: 300 +--- + +This article describes the basic configuration of a proxy server. You will learn how to pass a request from NGINX to proxied servers over different protocols, modify client request headers that are sent to the proxied server, and configure buffering of responses coming from the proxied servers. + +## Introduction + +Proxying is typically used to distribute the load among several servers, seamlessly show content from different websites, or pass requests for processing to application servers over protocols other than HTTP. + +## Passing a Request to a Proxied Server + +When NGINX proxies a request, it sends the request to a specified proxied server, fetches the response, and sends it back to the client. It is possible to proxy requests to an HTTP server (another NGINX server or any other server) or a non-HTTP server (which can run an application developed with a specific framework, such as PHP or Python) using a specified protocol. Supported protocols include [FastCGI](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html), [uwsgi](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html), [SCGI](https://nginx.org/en/docs/http/ngx_http_scgi_module.html), and [memcached](https://nginx.org/en/docs/http/ngx_http_memcached_module.html). + +To pass a request to an HTTP proxied server, the [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive is specified inside a [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location). For example: + +```nginx +location /some/path/ { + proxy_pass http://www.example.com/link/; +} +``` + +This example configuration results in passing all requests processed in this location to the proxied server at the specified address. This address can be specified as a domain name or an IP address. The address may also include a port: + +```nginx +location ~ \.php { + proxy_pass http://127.0.0.1:8000; +} +``` + +Note that in the first example above, the address of the proxied server is followed by a URI, `/link/`. If the URI is specified along with the address, it replaces the part of the request URI that matches the location parameter. For example, here the request with the `/some/path/page.html` URI will be proxied to `http://www.example.com/link/page.html`. If the address is specified without a URI, or it is not possible to determine the part of URI to be replaced, the full request URI is passed (possibly, modified). + +To pass a request to a non-HTTP proxied server, the appropriate `**_pass` directive should be used: + +- [fastcgi_pass](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass) passes a request to a FastCGI server +- [uwsgi_pass](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass) passes a request to a uwsgi server +- [scgi_pass](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass) passes a request to an SCGI server +- [memcached_pass](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_pass) passes a request to a memcached server + +Note that in these cases, the rules for specifying addresses may be different. You may also need to pass additional parameters to the server (see the [reference documentation](https://nginx.org/en/docs/) for more detail). + +The [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive can also point to a [named group](https://nginx.org/en/docs/http/load_balancing.html#algorithms) of servers. In this case, requests are distributed among the servers in the group according to the [specified method](https://www.nginx.com/resources/admin-guide/load-balancer/). + + +## Passing Request Headers + +By default, NGINX redefines two header fields in proxied requests, “Host” and “Connection”, and eliminates the header fields whose values are empty strings. “Host” is set to the `$proxy_host` variable, and “Connection” is set to `close`. + +To change these setting, as well as modify other header fields, use the [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) directive. This directive can be specified in a [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) or higher. It can also be specified in a particular [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) context or in the [http](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) block. For example: + +```nginx +location /some/path/ { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_pass http://localhost:8000; +} +``` + +In this configuration the “Host” field is set to the [$host](https://nginx.org/en/docs/http/ngx_http_core_module.html#variables) variable. + +To prevent a header field from being passed to the proxied server, set it to an empty string as follows: + +```nginx +location /some/path/ { + proxy_set_header Accept-Encoding ""; + proxy_pass http://localhost:8000; +} +``` + + +## Configuring Buffers + +By default NGINX buffers responses from proxied servers. A response is stored in the internal buffers and is not sent to the client until the whole response is received. Buffering helps to optimize performance with slow clients, which can waste proxied server time if the response is passed from NGINX to the client synchronously. However, when buffering is enabled NGINX allows the proxied server to process responses quickly, while NGINX stores the responses for as much time as the clients need to download them. + +The directive that is responsible for enabling and disabling buffering is [proxy_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering). By default it is set to `on` and buffering is enabled. + +The [proxy_buffers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers) directive controls the size and the number of buffers allocated for a request. The first part of the response from a proxied server is stored in a separate buffer, the size of which is set with the [proxy_buffer_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) directive. This part usually contains a comparatively small response header and can be made smaller than the buffers for the rest of the response. + +In the following example, the default number of buffers is increased and the size of the buffer for the first portion of the response is made smaller than the default. + +```nginx +location /some/path/ { + proxy_buffers 16 4k; + proxy_buffer_size 2k; + proxy_pass http://localhost:8000; +} +``` + +If buffering is disabled, the response is sent to the client synchronously while it is receiving it from the proxied server. This behavior may be desirable for fast interactive clients that need to start receiving the response as soon as possible. + +To disable buffering in a specific location, place the [proxy_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering) directive in the [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) with the `off` parameter, as follows: + +```nginx +location /some/path/ { + proxy_buffering off; + proxy_pass http://localhost:8000; +} +``` + +In this case NGINX uses only the buffer configured by [proxy_buffer_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) to store the current part of a response. + +A common use of a reverse proxy is to provide load balancing. Learn how to improve power, performance, and focus on your apps with rapid deployment in the free [Five Reasons to Choose a Software Load Balancer](https://www.nginx.com/resources/library/five-reasons-choose-software-load-balancer/) ebook. + + +## Choosing an Outgoing IP Address + +If your proxy server has several network interfaces, sometimes you might need to choose a particular source IP address for connecting to a proxied server or an upstream. This may be useful if a proxied server behind NGINX is configured to accept connections from particular IP networks or IP address ranges. + +Specify the [proxy_bind](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_bind) directive and the IP address of the necessary network interface: + +```nginx +location /app1/ { + proxy_bind 127.0.0.1; + proxy_pass http://example.com/app1/; +} + +location /app2/ { + proxy_bind 127.0.0.2; + proxy_pass http://example.com/app2/; +} +``` + +The IP address can be also specified with a variable. For example, the [`$server_addr`](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_server_addr) variable passes the IP address of the network interface that accepted the request: + +```nginx +location /app3/ { + proxy_bind $server_addr; + proxy_pass http://example.com/app3/; +} +``` diff --git a/content/nginx/admin-guide/web-server/serving-static-content.md b/content/nginx/admin-guide/web-server/serving-static-content.md new file mode 100644 index 000000000..77e151213 --- /dev/null +++ b/content/nginx/admin-guide/web-server/serving-static-content.md @@ -0,0 +1,217 @@ +--- +description: Configure NGINX and F5 NGINX Plus to serve static content, with type-specific + root directories, checks for file existence, and performance optimizations. +docs: DOCS-442 +doctypes: +- task +title: Serving Static Content +toc: true +weight: 200 +--- + +This section describes how to configure NGINX and F5 NGINX Plus to serve static content, how to define which paths are searched to find requested files, how to set up index files, and how to tune NGINX and NGINX Plus, as well as the kernel, for optimal performance. + + +## Root Directory and Index Files + +The [root](https://nginx.org/en/docs/http/ngx_http_core_module.html#root) directive specifies the root directory that will be used to search for a file. To obtain the path of a requested file, NGINX appends the request URI to the path specified by the `root` directive. The directive can be placed on any level within the `http {}`, `server {}`, or `location {}` contexts. In the example below, the `root` directive is defined for a virtual server. It applies to all `location {}` blocks where the `root` directive is not included to explicitly redefine the root: + +```nginx +server { + root /www/data; + + location / { + } + + location /images/ { + } + + location ~ \.(mp3|mp4) { + root /www/media; + } +} +``` + +Here, NGINX searches for a URI that starts with `/images/` in the `/www/data/images/` directory in the file system. But if the URI ends with the `.mp3` or `.mp4` extension, NGINX instead searches for the file in the `/www/media/` directory because it is defined in the matching `location` block. + +If a request ends with a slash, NGINX treats it as a request for a directory and tries to find an index file in the directory. The [index](https://nginx.org/en/docs/http/ngx_http_index_module.html#index) directive defines the index file’s name (the default value is `index.html`). To continue with the example, if the request URI is `/images/some/path/`, NGINX delivers the file `/www/data/images/some/path/index.html` if it exists. If it does not, NGINX returns HTTP code `404 (Not Found)` by default. To configure NGINX to return an automatically generated directory listing instead, include the `on` parameter to the [autoindex](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex) directive: + +```nginx +location /images/ { + autoindex on; +} +``` + +You can list more than one filename in the `index` directive. NGINX searches for files in the specified order and returns the first one it finds. + +```nginx +location / { + index index.$geo.html index.htm index.html; +} +``` + +The `$geo` variable used here is a custom variable set through the [geo](https://nginx.org/en/docs/http/ngx_http_geo_module.html#geo) directive. The value of the variable depends on the client’s IP address. + +To return the index file, NGINX checks for its existence and then makes an internal redirect to the URI obtained by appending the name of the index file to the base URI. The internal redirect results in a new search of a location and can end up in another location as in the following example: + +```nginx +location / { + root /data; + index index.html index.php; +} + +location ~ \.php { + fastcgi_pass localhost:8000; + #... +} +``` + +Here, if the URI in a request is `/path/`, and `/data/path/index.html` does not exist but `/data/path/index.php` does, the internal redirect to `/path/index.php` is mapped to the second location. As a result, the request is proxied. + + +## Trying Several Options + +The [try_files](https://nginx.org/en/docs/http/ngx_http_core_module.html#try_files) directive can be used to check whether the specified file or directory exists; NGINX makes an internal redirect if it does, or returns a specified status code if it doesn’t. For example, to check the existence of a file corresponding to the request URI, use the `try_files` directive and the `$uri` variable as follows: + +```nginx +server { + root /www/data; + + location /images/ { + try_files $uri /images/default.gif; + } +} +``` + +The file is specified in the form of the URI, which is processed using the `root` or `alias` directives set in the context of the current location or virtual server. In this case, if the file corresponding to the original URI doesn’t exist, NGINX makes an internal redirect to the URI specified by the last parameter, returning `/www/data/images/default.gif`. + +The last parameter can also be a status code (directly preceded by the equals sign) or the name of a location. In the following example, a `404` error is returned if none of the parameters to the `try_files` directive resolve to an existing file or directory. + +```nginx +location / { + try_files $uri $uri/ $uri.html =404; +} +``` + +In the next example, if neither the original URI nor the URI with the appended trailing slash resolve into an existing file or directory, the request is redirected to the named location which passes it to a proxied server. + +```nginx +location / { + try_files $uri $uri/ @backend; +} + +location @backend { + proxy_pass http://backend.example.com; +} +``` + +For more information, watch the [Content Caching](https://www.nginx.com/resources/webinars/content-caching-nginx-plus/) webinar on‑demand to learn how to dramatically improve the performance of a website, and get a deep‑dive into NGINX’s caching capabilities. + + +## Optimizing Performance for Serving Content + +Loading speed is a crucial factor of serving any content. Making minor optimizations to your NGINX configuration may boost the productivity and help reach optimal performance. + +### Enabling `sendfile` + +By default, NGINX handles file transmission itself and copies the file into the buffer before sending it. Enabling the [sendfile](https://nginx.org/en/docs/http/ngx_http_core_module.html#sendfile) directive eliminates the step of copying the data into the buffer and enables direct copying data from one file descriptor to another. Alternatively, to prevent one fast connection from entirely occupying the worker process, you can use the [sendfile_max_chunk](https://nginx.org/en/docs/http/ngx_http_core_module.html#sendfile_max_chunk) directive to limit the amount of data transferred in a single `sendfile()` call (in this example, to `1` MB): + +```nginx +location /mp3 { + sendfile on; + sendfile_max_chunk 1m; + #... +} +``` + +### Enabling `tcp_nopush` + +Use the [tcp_nopush](https://nginx.org/en/docs/http/ngx_http_core_module.html#tcp_nopush) directive together with the [sendfile](https://nginx.org/en/docs/http/ngx_http_core_module.html#sendfile) `on;`directive. This enables NGINX to send HTTP response headers in one packet right after the chunk of data has been obtained by `sendfile()`. + +```nginx +location /mp3 { + sendfile on; + tcp_nopush on; + #... +} +``` + +### Enabling `tcp_nodelay` + +The [tcp_nodelay](https://nginx.org/en/docs/http/ngx_http_core_module.html#tcp_nodelay) directive allows override of [Nagle’s algorithm](https://en.wikipedia.org/wiki/Nagle's_algorithm), originally designed to solve problems with small packets in slow networks. The algorithm consolidates a number of small packets into a larger one and sends the packet with a `200` ms delay. Nowadays, when serving large static files, the data can be sent immediately regardless of the packet size. The delay also affects online applications (ssh, online games, online trading, and so on). By default, the [tcp_nodelay](https://nginx.org/en/docs/http/ngx_http_core_module.html#tcp_nodelay) directive is set to `on` which means that the Nagle’s algorithm is disabled. Use this directive only for keepalive connections: + + +```nginx +location /mp3 { + tcp_nodelay on; + keepalive_timeout 65; + #... +} +``` + + +### Optimizing the Backlog Queue + +One of the important factors is how fast NGINX can handle incoming connections. The general rule is when a connection is established, it is put into the “listen” queue of a listen socket. Under normal load, either the queue is small or there is no queue at all. But under high load, the queue can grow dramatically, resulting in uneven performance, dropped connections, and increased latency. + +#### Displaying the Listen Queue + +To display the current listen queue, run this command: + +```none +netstat -Lan +``` + +The output might be like the following, which shows that in the listen queue on port `80` there are `10` unaccepted connections against the configured maximum of `128` queued connections. This situation is normal. + +```none +Current listen queue sizes (qlen/incqlen/maxqlen) +Listen Local Address +0/0/128 *.12345 +10/0/128 *.80 +0/0/128 *.8080 +``` + +In contrast, in the following command the number of unaccepted connections (`192`) exceeds the limit of `128`. This is quite common when a web site experiences heavy traffic. To achieve optimal performance, you need to increase the maximum number of connections that can be queued for acceptance by NGINX in both your operating system and the NGINX configuration. + +```none +Current listen queue sizes (qlen/incqlen/maxqlen) +Listen Local Address +0/0/128 *.12345 +192/0/128 *.80 +0/0/128 *.8080 +``` + +#### Tuning the Operating System + +Increase the value of the `net.core.somaxconn` kernel parameter from its default value (`128`) to a value high enough for a large burst of traffic. In this example, it's increased to `4096`. + +- For FreeBSD, run the command: + + ```none + sudo sysctl kern.ipc.somaxconn=4096 + ``` + +- For Linux: + 1. Run the command: + + ```none + sudo sysctl -w net.core.somaxconn=4096 + ``` + + 2. Use a text editor to add the following line to `/etc/sysctl.conf`: + + ```none + net.core.somaxconn = 4096 + ``` + +#### Tuning NGINX + +If you set the `somaxconn` kernel parameter to a value greater than `512`, change the `backlog` parameter to the NGINX [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive to match: + +```nginx +server { + listen 80 backlog=4096; + # ... +} +``` diff --git a/content/nginx/admin-guide/web-server/web-server.md b/content/nginx/admin-guide/web-server/web-server.md new file mode 100644 index 000000000..0b7a473b7 --- /dev/null +++ b/content/nginx/admin-guide/web-server/web-server.md @@ -0,0 +1,291 @@ +--- +description: Configure NGINX and F5 NGINX Plus as a web server, with support for virtual + server multi-tenancy, URI and response rewriting, variables, and error handling. +docs: DOCS-443 +doctypes: +- task +title: Configuring NGINX and NGINX Plus as a Web Server +toc: true +weight: 100 +--- + +This article explains how to configure NGINX Open Source and F5 NGINX Plus as a web server. + +**Note:** The information in this article applies to both NGINX Open Source and NGINX Plus. For ease of reading, the remainder of the article refers to NGINX Plus only. + +At a high level, configuring NGINX Plus as a web server is a matter of defining which URLs it handles and how it processes HTTP requests for resources at those URLs. At a lower level, the configuration defines a set of _virtual servers_ that control the processing of requests for particular domains or IP addresses. For more information about configuration files, refer to [Creating NGINX and NGINX Plus Configuration Files]({{< ref "/nginx/admin-guide/basic-functionality/managing-configuration-files">}}). + +Each virtual server for HTTP traffic defines special configuration instances called _locations_ that control processing of specific sets of URIs. Each location defines its own scenario of what happens to requests that are mapped to this location. NGINX Plus provides full control over this process. Each location can proxy the request or return a file. In addition, the URI can be modified, so that the request is redirected to another location or virtual server. Also, a specific error code can be returned and you can configure a specific page to correspond to each error code. + + + +## Setting Up Virtual Servers + +The NGINX Plus configuration file must include at least one [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) directive to define a virtual server. When NGINX Plus processes a request, it first selects the virtual server that will serve the request. + +A virtual server is defined by a `server` directive in the `http` context, for example: + +```nginx +http { + server { + # Server configuration + } +} +``` + +It is possible to add multiple `server` directives into the `http` context to define multiple virtual servers. + +The `server` configuration block usually includes a [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive to specify the IP address and port (or Unix domain socket and path) on which the server listens for requests. Both IPv4 and IPv6 addresses are accepted; enclose IPv6 addresses in square brackets. + +The example below shows configuration of a server that listens on IP address 127.0.0.1 and port 8080: + +```nginx +server { + listen 127.0.0.1:8080; + # Additional server configuration +} +``` + +If a port is omitted, the standard port is used. Likewise, if an address is omitted, the server listens on all addresses. If the `listen` directive is not included at all, the “standard” port is `80/tcp` and the “default” port is `8000/tcp`, depending on superuser privileges. + +If there are several servers that match the IP address and port of the request, NGINX Plus tests the request’s `Host` header field against the [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) directives in the `server` blocks. The parameter to `server_name` can be a full (exact) name, a wildcard, or a regular expression. A wildcard is a character string that includes the asterisk (`*`) at its beginning, end, or both; the asterisk matches any sequence of characters. NGINX Plus uses the Perl syntax for regular expressions; precede them with the tilde (`~`). This example illustrates an exact name. + +```nginx +server { + listen 80; + server_name example.org www.example.org; + #... +} +``` + +If several names match the `Host` header, NGINX Plus selects one by searching for names in the following order and using the first match it finds: + +1. Exact name +2. Longest wildcard starting with an asterisk, such as `*.example.org` +3. Longest wildcard ending with an asterisk, such as `mail.*` +4. First matching regular expression (in order of appearance in the configuration file) + +If the `Host` header field does not match a server name, NGINX Plus routes the request to the default server for the port on which the request arrived. The default server is the first one listed in the **nginx.conf** file, unless you include the `default_server` parameter to the `listen` directive to explicitly designate a server as the default. + +```nginx +server { + listen 80 default_server; + #... +} +``` + + + +## Configuring Locations + +NGINX Plus can send traffic to different proxies or serve different files based on the request URIs. These blocks are defined using the [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) directive placed within a `server` directive. + +For example, you can define three `location` blocks to instruct the virtual server to send some requests to one proxied server, send other requests to a different proxied server, and serve the rest of the requests by delivering files from the local file system. + +NGINX Plus tests request URIs against the parameters of all `location` directives and applies the directives defined in the matching location. Inside each `location` block, it is usually possible (with a few exceptions) to place even more `location` directives to further refine the processing for specific groups of requests. + +**Note:** In this guide, the word _location_ refers to a single [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) context. + +There are two types of parameter to the `location` directive: _prefix strings_ (pathnames) and regular expressions. For a request URI to match a prefix string, it must start with the prefix string. + +The following sample location with a pathname parameter matches request URIs that begin with **/some/path/**, such as **/some/path/document.html**. (It does not match **/my-site/some/path** because **/some/path** does not occur at the start of that URI.) + +```nginx +location /some/path/ { + #... +} +``` + +A regular expression is preceded with the tilde (`~`) for case-sensitive matching, or the tilde-asterisk (`~*`) for case-insensitive matching. The following example matches URIs that include the string **.html** or **.htm** in any position. + +```nginx +location ~ \.html? { + #... +} +``` + + + +### NGINX Location Priority + +To find the location that best matches a URI, NGINX Plus first compares the URI to the locations with a prefix string. It then searches the locations with a regular expression. + +Higher priority is given to regular expressions, unless the `^~` modifier is used. Among the prefix strings NGINX Plus selects the most specific one (that is, the longest and most complete string). The exact logic for selecting a location to process a request is given below: + +1. Test the URI against all prefix strings. +2. The `=` (equals sign) modifier defines an exact match of the URI and a prefix string. If the exact match is found, the search stops. +3. If the `^~` (caret-tilde) modifier prepends the longest matching prefix string, the regular expressions are not checked. +4. Store the longest matching prefix string. +5. Test the URI against regular expressions. +6. Stop processing when the first matching regular expression is found and use the corresponding location. +7. If no regular expression matches, use the location corresponding to the stored prefix string. + +A typical use case for the `=` modifier is requests for **/** (forward slash). If requests for **/** are frequent, specifying `= /` as the parameter to the `location` directive speeds up processing, because the search for matches stops after the first comparison. + +```nginx +location = / { + #... +} +``` + +A `location` context can contain directives that define how to resolve a request – either serve a static file or pass the request to a proxied server. In the following example, requests that match the first `location` context are served files from the **/data** directory and the requests that match the second are passed to the proxied server that hosts content for the **** domain. + +```nginx +server { + location /images/ { + root /data; + } + + location / { + proxy_pass http://www.example.com; + } +} +``` + +The [root](https://nginx.org/en/docs/http/ngx_http_core_module.html#root) directive specifies the file system path in which to search for the static files to serve. The request URI associated with the location is appended to the path to obtain the full name of the static file to serve. In the example above, in response to a request for **/images/example.png**, NGINX Plus delivers the file **/data/images/example.png**. + +The [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) directive passes the request to the proxied server accessed with the configured URL. The response from the proxied server is then passed back to the client. In the example above, all requests with URIs that do not start with **/images/** are be passed to the proxied server. + + + +## Using Variables + +You can use variables in the configuration file to have NGINX Plus process requests differently depending on defined circumstances. Variables are named values that are calculated at runtime and are used as parameters to directives. A variable is denoted by the `$` (dollar) sign at the beginning of its name. Variables define information based upon NGINX’s state, such as the properties of the request being currently processed. + +There are a number of predefined variables, such as the [core HTTP](https://nginx.org/en/docs/http/ngx_http_core_module.html#variables) variables, and you can define custom variables using the [set](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#set), [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map), and [geo](https://nginx.org/en/docs/http/ngx_http_geo_module.html#geo) directives. Most variables are computed at runtime and contain information related to a specific request. For example, `$remote_addr` contains the client IP address and `$uri` holds the current URI value. + + + +## Returning Specific Status Codes + +Some website URIs require immediate return of a response with a specific error or redirect code, for example when a page has been moved temporarily or permanently. The easiest way to do this is to use the [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) directive. For example: + +```nginx +location /wrong/url { + return 404; +} +``` + +The first parameter of `return` is a response code. The optional second parameter can be the URL of a redirect (for codes `301`, `302`, `303`, and `307`) or the text to return in the response body. For example: + +```nginx +location /permanently/moved/url { + return 301 http://www.example.com/moved/here; +} +``` + + + +The `return` directive can be included in both the `location` and `server` contexts. + + + +## Rewriting URIs in Requests + +A request URI can be modified multiple times during request processing through the use of the [rewrite](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#rewrite) directive, which has one optional and two required parameters. The first (required) parameter is the regular expression that the request URI must match. The second parameter is the URI to substitute for the matching URI. The optional third parameter is a flag that can halt processing of further `rewrite` directives or send a redirect (code `301` or `302`). For example: + +```nginx +location /users/ { + rewrite ^/users/(.*)$ /show?user=$1 break; +} +``` + +As this example shows, the second parameter `users` captures though matching of regular expressions. + +You can include multiple `rewrite` directives in both the `server` and `location` contexts. NGINX Plus executes the directives one-by-one in the order they occur. The `rewrite` directives in a `server` context are executed once when that context is selected. + +After NGINX processes a set of rewriting instructions, it selects a `location` context according to the new URI. If the selected location contains `rewrite` directives, they are executed in turn. If the URI matches any of those, a search for the new location starts after all defined `rewrite` directives are processed. + +The following example shows `rewrite` directives in combination with a `return` directive. + +```nginx +server { + #... + rewrite ^(/download/.*)/media/(\w+)\.?.*$ $1/mp3/$2.mp3 last; + rewrite ^(/download/.*)/audio/(\w+)\.?.*$ $1/mp3/$2.ra last; + return 403; + #... +} +``` + +This example configuration distinguishes between two sets of URIs. URIs such as **/download/some/media/file** are changed to **/download/some/mp3/file.mp3**. Because of the `last` flag, the subsequent directives (the second `rewrite` and the `return` directive) are skipped but NGINX Plus continues processing the request, which now has a different URI. Similarly, URIs such as **/download/some/audio/file** are replaced with **/download/some/mp3/file.ra**. If a URI doesn’t match either `rewrite` directive, NGINX Plus returns the `403` error code to the client. + +There are two parameters that interrupt processing of `rewrite` directives: + +- `last` – Stops execution of the `rewrite` directives in the current `server` or `location` context, but NGINX Plus searches for locations that match the rewritten URI, and any `rewrite` directives in the new location are applied (meaning the URI can be changed again). +- `break` – Like the [break](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#break) directive, stops processing of `rewrite` directives in the current context and cancels the search for locations that match the new URI. The `rewrite` directives in the new location are not executed. + + + +## Rewriting HTTP Responses + +Sometimes you need to rewrite or change the content in an HTTP response, substituting one string for another. You can use the [sub_filter](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter) directive to define the rewrite to apply. The directive supports variables and chains of substitutions, making more complex changes possible. + +For example, you can change absolute links that refer to a server other than the proxy: + +```nginx +location / { + sub_filter /blog/ /blog-staging/; + sub_filter_once off; +} +``` + +Another example changes the scheme from `http://` to `https://` and replaces the `localhost` address with the hostname from the request header field. The [sub_filter_once](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter_once) directive tells NGINX to apply [sub_filter](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter) directives consecutively within a location: + +```nginx +location / { + sub_filter 'href="http://127.0.0.1:8080/' 'href="https://$host/'; + sub_filter 'img src="http://127.0.0.1:8080/' 'img src="https://$host/'; + sub_filter_once on; +} +``` + +Note that the part of the response already modified with the `sub_filter` is not replaced again if another `sub_filter` match occurs. + + + +## Handling Errors + +With the [error_page](https://nginx.org/en/docs/http/ngx_http_core_module.html#error_page) directive, you can configure NGINX Plus to return a custom page along with an error code, substitute a different error code in the response, or redirect the browser to a different URI. In the following example, the `error_page` directive specifies the page (**/404.html**) to return with the `404` error code. + +```nginx +error_page 404 /404.html; +``` + +Note that this directive does not mean that the error is returned immediately (the `return` directive does that), but simply specifies how to treat errors when they occur. The error code can come from a proxied server or occur during processing by NGINX Plus (for example, the `404` results when NGINX Plus can’t find the file requested by the client). + +In the following example, when NGINX Plus cannot find a page, it substitutes code `301` for code `404`, and redirects the client to **http:/example.com/new/path.html**. This configuration is useful when clients are still trying to access a page at its old URI. The `301` code informs the browser that the page has moved permanently, and it needs to replace the old address with the new one automatically upon return. + +```nginx +location /old/path.html { + error_page 404 =301 http:/example.com/new/path.html; +} +``` + +The following configuration is an example of passing a request to the back end when a file is not found. Because there is no status code specified after the equals sign in the `error_page` directive, the response to the client has the status code returned by the proxied server (not necessarily `404`). + +```nginx +server { + ... + location /images/ { + # Set the root directory to search for the file + root /data/www; + + # Disable logging of errors related to file existence + open_file_cache_errors off; + + # Make an internal redirect if the file is not found + error_page 404 = /fetch$uri; + } + + location /fetch/ { + proxy_pass http://backend/; + } +} +``` + +The `error_page` directive instructs NGINX Plus to make an internal redirect when a file is not found. The `$uri` variable in the final parameter to the `error_page` directive holds the URI of the current request, which gets passed in the redirect. + +For example, if **/images/some/file** is not found, it is replaced with **/fetch/images/some/file** and a new search for a location starts. As a result, the request ends up in the second `location` context and is [proxied](https://www.nginx.com/resources/admin-guide/reverse-proxy/) to **"http://backend/"**. + +The [open_file_cache_errors](https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache_errors) directive prevents writing an error message if a file is not found. This is not necessary here since missing files are correctly handled. diff --git a/content/nginx/admin-guide/yaml/v1/nginx_api.yaml b/content/nginx/admin-guide/yaml/v1/nginx_api.yaml new file mode 100644 index 000000000..a4db0c726 --- /dev/null +++ b/content/nginx/admin-guide/yaml/v1/nginx_api.yaml @@ -0,0 +1,2977 @@ +swagger: '2.0' +info: + version: '1.0' + title: NGINX Plus REST API + description: NGINX Plus REST + [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + provides access to NGINX Plus status information, + on-the-fly configuration of upstream servers and + [key-value](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) + pairs management. +basePath: /api/1 +tags: + - name: General Info + - name: Processes + - name: Connections + - name: SSL + - name: Slabs + - name: HTTP + - name: HTTP Requests + - name: HTTP Caches + - name: HTTP Server Zones + - name: HTTP Upstreams + - name: HTTP Keyvals + - name: Stream + - name: Stream Server Zones + - name: Stream Upstreams + - name: Method GET + - name: Method POST + - name: Method PATCH + - name: Method DELETE +schemes: + - http +paths: + /: + get: + tags: + - General Info + - Method GET + summary: Return list of root endpoints + description: Returns a list of root endpoints. + operationId: getAPIEndpoints + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /nginx: + get: + tags: + - General Info + - Method GET + summary: Return status of nginx running instance + description: Returns nginx version, build name, address, + number of configuration reloads, IDs of master and worker processes. + operationId: getNginx + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of nginx running instance will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxObject' + /processes: + get: + tags: + - Processes + - Method GET + summary: Return nginx processes status + description: Returns the number of abnormally terminated + and respawned child processes. + operationId: getProcesses + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxProcesses' + delete: + tags: + - Processes + - Method DELETE + summary: Reset nginx processes statistics + description: Resets counters of abnormally terminated and respawned + child processes. + operationId: deleteProcesses + responses: + '204': + description: Success + /connections: + get: + tags: + - Connections + - Method GET + summary: Return client connections statistics + description: Returns statistics of client connections. + operationId: getConnections + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxConnections' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the connections statistics will be output. + delete: + tags: + - Connections + - Method DELETE + summary: Reset client connections statistics + description: Resets statistics of accepted and dropped + client connections. + operationId: deleteConnections + responses: + '204': + description: Success + /ssl: + get: + tags: + - SSL + - Method GET + summary: Return SSL statistics + description: Returns SSL statistics. + operationId: getSsl + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSSLObject' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of SSL statistics will be output. + delete: + tags: + - SSL + - Method DELETE + summary: Reset SSL statistics + description: Resets counters of SSL handshakes and session reuses. + operationId: deleteSslStat + responses: + '204': + description: Success + /slabs/: + get: + tags: + - Slabs + - Method GET + summary: Return status of all slabs + description: Returns status of slabs + for each shared memory zone with slab allocator. + operationId: getSlabs + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of slab zones will be output. + If the “fields” value is empty, + then only zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZoneMap' + '/slabs/{slabZoneName}': + parameters: + - name: slabZoneName + in: path + description: The name of the shared memory zone with slab allocator. + required: true + type: string + get: + tags: + - Slabs + - Method GET + summary: Return status of a slab + description: Returns status of slabs for a particular shared memory zone + with slab allocator. + operationId: getSlabZone + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the slab zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZone' + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Slabs + - Method DELETE + summary: Reset slab statistics + description: Resets the “reqs” and “fails” + metrics for each memory slot. + operationId: deleteSlabZoneStats + responses: + '204': + description: Success + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/: + get: + tags: + - HTTP + - Method GET + summary: Return list of HTTP-related endpoints + description: Returns a list of first level HTTP endpoints. + operationId: getHttp + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /http/requests: + get: + tags: + - HTTP Requests + - Method GET + summary: Return HTTP requests statistics + description: Returns status of client HTTP requests. + operationId: getHttpRequests + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of client HTTP requests statistics + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPRequests' + delete: + tags: + - HTTP Requests + - Method DELETE + summary: Reset HTTP requests statistics + description: Resets the number of total client HTTP requests. + operationId: deleteHttpRequests + responses: + '204': + description: Success + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/server_zones/: + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of all HTTP server zones + description: Returns status information for each HTTP + [server zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone). + operationId: getHttpServerZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZonesMap' + '/http/server_zones/{httpServerZoneName}': + parameters: + - name: httpServerZoneName + in: path + description: The name of an HTTP server zone. + type: string + required: true + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of an HTTP server zone + description: Returns status of a particular HTTP server zone. + operationId: getHttpServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZone' + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Server Zones + - Method DELETE + summary: Reset statistics for an HTTP server zone + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular HTTP server zone. + operationId: deleteHttpServerZoneStat + responses: + '204': + description: Success + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/caches/: + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of all caches + description: Returns status of each cache configured by + [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + and other “*_cache_path” directives. + operationId: getHttpCaches + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of cache zones will be output. + If the “fields” value is empty, + then only names of cache zones are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCachesMap' + '/http/caches/{httpCacheZoneName}': + parameters: + - name: httpCacheZoneName + in: path + description: The name of the cache zone. + type: string + required: true + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of a cache + description: Returns status of a particular cache. + operationId: getHttpCacheZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the cache zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCache' + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Caches + - Method DELETE + summary: Reset cache statistics + description: Resets statistics of cache hits/misses in a particular cache zone. + operationId: deleteHttpCacheZoneStat + responses: + '204': + description: Success + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/upstreams/: + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of all HTTP upstream server groups + description: Returns status of each HTTP upstream server group + and its servers. + operationId: getHttpUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamMap' + '/http/upstreams/{httpUpstreamName}/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an HTTP upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of an HTTP upstream server group + description: Returns status of a particular HTTP upstream server group + and its servers. + operationId: getHttpUpstreamName + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Reset statistics of an HTTP upstream server group + description: Resets the statistics for each upstream server + in an upstream server group and queue statistics. + operationId: deleteHttpUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of all servers in an HTTP upstream server group + description: Returns configuration of each server + in a particular HTTP upstream server group. + operationId: getHttpUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Upstreams + - Method POST + summary: Add a server to an HTTP upstream server group + description: Adds a new server to an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: postHttpUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postHttpUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/{httpUpstreamServerId}': + parameters: + - name: httpUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: httpUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of a server in an HTTP upstream server group + description: Returns configuration of a particular server + in the HTTP upstream server group. + operationId: getHttpUpstreamPeer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Upstreams + - Method PATCH + summary: Modify a server in an HTTP upstream server group + description: Modifies settings of a particular server + in an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: patchHttpUpstreamPeer + produces: + - application/json + parameters: + - in: body + name: patchHttpUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “*ID*” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Remove a server from an HTTP upstream server group + description: Removes a server from an HTTP upstream server group. + operationId: deleteHttpUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/keyvals/: + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from all keyval zones + description: Returns key-value pairs for each keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only keyval zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonesMap' + '/http/keyvals/{httpKeyvalZoneName}': + parameters: + - name: httpKeyvalZoneName + in: path + description: The name of a keyval shared memory zone. + required: true + type: string + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from a keyval zone + description: Returns key-value pairs stored in a particular keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Keyvals + - Method POST + summary: Add a key-value pair to the keyval zone + description: Adds a new key-value pair to the keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the keyval shared memory zone is empty. + operationId: postHttpKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the keyval shared memory zone is empty. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + responses: + '201': + description: Created + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair + or deletes a key by setting the key value to null. + operationId: patchHttpKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: httpKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + responses: + '204': + description: Success + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Keyvals + - Method DELETE + summary: Empty the keyval zone + description: Deletes all key-value pairs from the keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: deleteHttpKeyvalZoneData + responses: + '204': + description: Success + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/: + get: + tags: + - Stream + summary: Return list of stream-related endpoints + description: Returns a list of first level stream endpoints. + operationId: getStream + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /stream/server_zones/: + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of all stream server zones + description: Returns status information for each stream + [server zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone). + operationId: getStreamServerZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZonesMap' + '404': + description: > + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) + not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '/stream/server_zones/{streamServerZoneName}': + parameters: + - name: streamServerZoneName + in: path + description: The name of a stream server zone. + type: string + required: true + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of a stream server zone + description: Returns status of a particular stream server zone. + operationId: getStreamServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Server Zones + - Method DELETE + summary: Reset statistics for a stream server zone + description: Resets statistics of accepted and discarded connections, sessions, + received and sent bytes in a particular stream server zone. + operationId: deleteStreamServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/upstreams/: + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of all stream upstream server groups + description: Returns status of each stream upstream server group + and its servers. + operationId: getStreamUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamMap' + '404': + description: > + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) + not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/': + parameters: + - name: streamUpstreamName + in: path + description: The name of a stream upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of a stream upstream server group + description: Returns status of a particular stream upstream server group + and its servers. + operationId: getStreamUpstream + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Reset statistics of a stream upstream server group + description: Resets the statistics for each upstream server + in an upstream server group. + operationId: deleteStreamUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/': + parameters: + - name: streamUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of all servers in a stream upstream server group + description: Returns configuration of each server + in a particular stream upstream server group. + operationId: getStreamUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Upstreams + - Method POST + summary: Add a server to a stream upstream server group + description: Adds a new server to a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: postStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postStreamUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/{streamUpstreamServerId}': + parameters: + - name: streamUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: streamUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of a server in a stream upstream server group + description: Returns configuration of a particular server + in the stream upstream server group. + operationId: getStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Upstreams + - Method PATCH + summary: Modify a server in a stream upstream server group + description: Modifies settings of a particular server + in a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: patchStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: patchStreamUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “**ID**” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Remove a server from a stream upstream server group + description: Removes a server from a stream server group. + operationId: deleteStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' +### +###DEFINITIONS +### +definitions: + ArrayOfStrings: + title: Array + description: | + An array of strings. + type: array + items: + type: string + NginxObject: + title: nginx + description: | + General information about nginx: + type: object + properties: + version: + type: string + description: Version of nginx. + build: + type: string + description: Name of nginx build. + address: + type: string + description: The address of the server that accepted status request. + generation: + type: integer + description: The total number of configuration + reloads. + load_timestamp: + type: string + format: date-time + description: Time of the last reload of configuration, + in milliseconds since Epoch. + timestamp: + type: string + format: date-time + description: Current time in milliseconds since Epoch. + pid: + type: integer + description: The ID of the worker process that handled status request. + ppid: + type: integer + description: The ID of the master process that started the + worker process. + example: + nginx: + version: 1.13.3 + build: nginx-plus-r12-p3 + address: 206.251.255.64 + generation: 2 + load_timestamp: 2017-07-07T11:09:21.594Z + timestamp: 2017-07-11T09:31:13.477Z + pid: 32212 + ppid: 32210 + NginxProcesses: + title: Processes + type: object + properties: + respawned: + type: integer + description: The total number of abnormally terminated + and respawned child processes. + example: + respawned: 0 + NginxConnections: + title: Connections + description: | + The number of accepted, dropped, active, and idle connections. + type: object + properties: + accepted: + type: integer + description: The total number of accepted client connections. + dropped: + type: integer + description: The total number of dropped client connections. + active: + type: integer + description: The current number of active client connections. + idle: + type: integer + description: The current number of idle client connections. + example: + accepted: 4968119 + dropped: 0 + active: 5 + idle: 117 + NginxSSLObject: + title: SSL + type: object + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + example: + handshakes: 79572 + handshakes_failed: 21025 + session_reuses: 15762 + NginxSlabZoneMap: + title: Slab Zones + description: | + Status zones that use slab allocator. + type: object + additionalProperties: + $ref: '#/definitions/NginxSlabZone' + example: + http_cache: + pages: + used: 2 + free: 2452 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 1 + free: 126 + reqs: 1 + fails: 0 + 64: + used: 2 + free: 62 + reqs: 2 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + dns-backends: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZone: + title: Shared memory zone with slab allocator + description: | + type: object + properties: + pages: + type: object + description: The number of free and used memory pages. + properties: + used: + type: integer + description: The current number of used memory pages. + free: + type: integer + description: The current number of free memory pages. + slots: + type: object + title: Memory Slots + description: Status data for memory slots (8, 16, 32, 64, 128, etc.) + additionalProperties: + $ref: '#/definitions/NginxSlabZoneSlot' + example: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZoneSlot: + title: Memory Slot + type: object + properties: + used: + type: integer + description: The current number of used memory slots. + free: + type: integer + description: The current number of free memory slots. + reqs: + type: integer + description: The total number of attempts + to allocate memory of specified size. + fails: + type: integer + description: The number of unsuccessful attempts + to allocate memory of specified size. + NginxHTTPRequests: + title: HTTP Requests + type: object + properties: + total: + type: integer + description: The total number of client requests. + current: + type: integer + description: The current number of client requests. + example: + total: 10624511 + current: 4 + NginxHTTPServerZonesMap: + title: HTTP Server Zones + description: | + Status data for all HTTP + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPServerZone' + example: + site1: + processing: 2 + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + processing: 1 + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPServerZone: + title: HTTP Server Zone + type: object + properties: + processing: + type: integer + description: The number of client requests + that are currently being processed. + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients and the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + processing: 1 + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPCachesMap: + title: HTTP Caches + description: | + Status information of all HTTP caches configured by + proxy_cache_path + and other “*_cache_path” directives. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPCache' + example: + http-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + frontend-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPCache: + title: HTTP Cache + type: object + properties: + size: + type: integer + description: The current size of the cache. + max_size: + type: integer + description: The limit on the maximum size of the cache + specified in the configuration. + cold: + type: boolean + description: A boolean value indicating whether the “cache loader” process + is still loading data from disk into the cache. + hit: + type: object + properties: + responses: + type: integer + description: The total number of + valid + responses read from the cache. + bytes: + type: integer + description: The total number of bytes read from the cache. + stale: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + updating: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache + while responses were being updated (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + revalidated: + type: object + properties: + responses: + type: integer + description: The total number of expired and revalidated responses + read from the cache (see + proxy_cache_revalidate + and other “*_cache_revalidate” directives. + bytes: + type: integer + description: The total number of bytes read from the cache. + miss: + type: object + properties: + responses: + type: integer + description: The total number of responses not found in the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + expired: + type: object + properties: + responses: + type: integer + description: The total number of expired responses not taken from the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + bypass: + type: object + properties: + responses: + type: integer + description: The total number of responses not looked up in the cache due to the + proxy_cache_bypass + and other “*_cache_bypass” directives. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + example: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPUpstreamMap: + title: HTTP Upstreams + description: | + Status information of all HTTP + dynamically configurable + groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPUpstream' + example: + trac-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: trac-backend + hg-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: hg-backend + NginxHTTPUpstream: + title: HTTP Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxHTTPUpstreamPeerMap' + keepalive: + type: integer + description: The current number of idle + keepalive + connections. + zombies: + type: integer + description: The current number of servers removed + from the group but still processing active client requests. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + queue: + type: object + description: > + For the requests + queue, + the following data are provided: + properties: + size: + type: integer + description: The current number of requests in the queue. + max_size: + type: integer + description: The maximum number of requests that can be in the queue + at the same time. + overflows: + type: integer + description: The total number of requests rejected due to the queue overflow. + example: + upstream_backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 20 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 20 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: upstream_backend + NginxHTTPUpstreamPeerMap: + title: HTTP Upstream Servers + description: | + An array of HTTP + upstream servers. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamPeer' + NginxHTTPUpstreamPeer: + title: HTTP Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + enum: + - up + - draining + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “draining”, “down”, + “unavail”, “checking”, + and “unhealthy”. + active: + type: integer + description: The current number of active connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + requests: + type: integer + description: The total number of client requests forwarded to this server. + readOnly: true + responses: + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses obtained from this server. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client requests + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + fails: + type: integer + description: The number of failed health checks. + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + last_passed: + type: boolean + description: Boolean indicating if the last health check request was successful + and passed + tests. + downtime: + type: integer + readOnly: true + description: Total time the server was in the “unavail”, + “checking”, and “unhealthy” states. + downstart: + type: string + format: date-time + readOnly: true + description: The time (in milliseconds since Epoch) when the server became + “unavail”, “checking”, + or “unhealthy”. + selected: + type: string + format: date-time + readOnly: true + description: The time (in milliseconds since Epoch) + when the server was last selected to process a request. + header_time: + type: integer + readOnly: true + description: The average time to get the + response header + from the server. + response_time: + type: integer + readOnly: true + description: The average time to get the + full response + from the server. + NginxHTTPUpstreamConfServerMap: + title: HTTP Upstream Servers + description: An array of HTTP upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:8088 + weight: 1 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: false + down: false + - id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPUpstreamConfServer: + title: HTTP Upstream Server + description: | + Dynamically configurable parameters of an HTTP upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the HTTP upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the HTTP upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “http” block. + See also the + resolve + parameter of the HTTP upstream server. + service: + type: string + description: Same as the + service + parameter of the HTTP upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the HTTP upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the HTTP upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the HTTP upstream server. + fail_timeout: + type: integer + description: Same as the + fail_timeout + parameter of the HTTP upstream server. + slow_start: + type: integer + description: Same as the + slow_start + parameter of the HTTP upstream server. + route: + type: string + description: Same as the + route + parameter of the HTTP upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the HTTP upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + drain: + type: boolean + description: Puts the HTTP upstream server into the “draining” mode. + In this mode, only requests + bound + to the server will be proxied to it. + The parameter cannot be initially set, + it can only be changed with the PATCH method. + example: + id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPKeyvalZonesMap: + title: Keyval Shared Memory Zones + description: | + Contents of all keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxHTTPKeyvalZone: + title: Keyval Shared Memory Zone + description: | + Contents of a keyval shared memory zone. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamServerZonesMap: + title: Stream Server Zones + description: | + Status information for all stream + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamServerZone' + example: + mysql-frontend: + processing: 2 + connections: 270925 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 270925 + discarded: 0 + received: 28988975 + sent: 3879346317 + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamServerZone: + title: Stream Server Zone + type: object + properties: + processing: + type: integer + description: The number of client connections + that are currently being processed. + connections: + type: integer + description: The total number of connections accepted from clients. + sessions: + type: object + description: The total number of completed sessions, + and the number of sessions completed with status codes + “2xx”, “4xx”, or “5xx”. + properties: + 2xx: + type: integer + description: The total number of sessions completed with + status codes + “2xx”. + 4xx: + type: integer + description: The total number of sessions completed with + status codes + “4xx”. + 5xx: + type: integer + description: The total number of sessions completed with + status codes + “5xx”. + total: + type: integer + description: The total number of completed client sessions. + discarded: + type: integer + description: The total number of + connections completed without creating a session. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamUpstreamMap: + title: Stream Upstreams + description: Status information of stream upstream server groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamUpstream' + example: + mysql_backends: + peers: + - id: 0 + server: 10.0.0.1:12345 + name: 10.0.0.1:12345 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connecions: 1231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12346 + name: 10.0.0.1:12346 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 30 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: mysql_backends + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + connections: 0 + max_conns: 30 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstream: + title: Stream Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxStreamUpstreamPeerMap' + zombies: + type: integer + description: The current number of servers removed from the group + but still processing active client connections. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + example: + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 50 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 50 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstreamPeerMap: + title: Stream Upstream Servers + description: Array of stream upstream servers. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamPeer' + NginxStreamUpstreamPeer: + title: Stream Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An + address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + format: hostname + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + readOnly: true + enum: + - up + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “down”, “unavail”, + “checking”, or “unhealthy”. + active: + type: integer + description: The current number of connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + connections: + type: integer + description: The total number of client connections forwarded to this server. + readOnly: true + connect_time: + type: integer + description: The average time to connect to the upstream server. + readOnly: true + first_byte_time: + type: integer + description: The average time to receive the first byte of data. + readOnly: true + response_time: + type: integer + description: The average time to receive the last byte of data. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client connections + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + readOnly: true + fails: + type: integer + description: The number of failed health checks. + readOnly: true + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + readOnly: true + last_passed: + type: boolean + description: Boolean indicating whether the last health check request + was successful and passed + tests. + readOnly: true + downtime: + type: integer + description: Total time the server was in the + “unavail”, “checking”, + and “unhealthy” states. + readOnly: true + downstart: + type: string + format: date-time + description: The time (in milliseconds since Epoch) when the server became + “unavail”, “checking”, + or “unhealthy”. + readOnly: true + selected: + type: string + format: date-time + description: The time (in milliseconds since Epoch) + when the server was last selected to process a connection. + readOnly: true + NginxStreamUpstreamConfServerMap: + title: Stream Upstream Servers + description: | + An array of stream upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + - id: 1 + server: 10.0.0.1:12349 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamUpstreamConfServer: + title: Stream Upstream Server + description: | + Dynamically configurable parameters of a stream upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the stream upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the stream upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “stream” block. + See also the + resolve + parameter of the stream upstream server. + service: + type: string + description: Same as the + service + parameter of the stream upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the stream upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the stream upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the stream upstream server. + fail_timeout: + type: integer + description: Same as the + fail_timeout + parameter of the stream upstream server. + slow_start: + type: integer + description: Same as the + slow_start + parameter of the stream upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the stream upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxError: + title: Error + description: | + nginx error object. + type: object + properties: + path: + type: string + description: API path. + method: + type: string + description: HTTP method. + error: + type: object + properties: + status: + type: string + description: HTTP error code. + text: + type: string + description: Error description. + code: + type: string + description: Internal nginx error code. + request_id: + type: string + description: The ID of the request, equals the value of the + $request_id + variable. + href: + type: string + description: Link to reference documentation. diff --git a/content/nginx/admin-guide/yaml/v2/nginx_api.yaml b/content/nginx/admin-guide/yaml/v2/nginx_api.yaml new file mode 100644 index 000000000..c95eb947f --- /dev/null +++ b/content/nginx/admin-guide/yaml/v2/nginx_api.yaml @@ -0,0 +1,3174 @@ +swagger: '2.0' +info: + version: '2.0' + title: NGINX Plus REST API + description: NGINX Plus REST + [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + provides access to NGINX Plus status information, + on-the-fly configuration of upstream servers and + key-value pairs management for + [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and + [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). +basePath: /api/2 +tags: + - name: General Info + - name: Processes + - name: Connections + - name: SSL + - name: Slabs + - name: HTTP + - name: HTTP Requests + - name: HTTP Caches + - name: HTTP Server Zones + - name: HTTP Upstreams + - name: HTTP Keyvals + - name: Stream + - name: Stream Server Zones + - name: Stream Upstreams + - name: Stream Keyvals + - name: Method GET + - name: Method POST + - name: Method PATCH + - name: Method DELETE +schemes: + - http +paths: + /: + get: + tags: + - General Info + - Method GET + summary: Return list of root endpoints + description: Returns a list of root endpoints. + operationId: getAPIEndpoints + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /nginx: + get: + tags: + - General Info + - Method GET + summary: Return status of nginx running instance + description: Returns nginx version, build name, address, + number of configuration reloads, IDs of master and worker processes. + operationId: getNginx + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of nginx running instance will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxObject' + /processes: + get: + tags: + - Processes + - Method GET + summary: Return nginx processes status + description: Returns the number of abnormally terminated + and respawned child processes. + operationId: getProcesses + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxProcesses' + delete: + tags: + - Processes + - Method DELETE + summary: Reset nginx processes statistics + description: Resets counters of abnormally terminated and respawned + child processes. + operationId: deleteProcesses + responses: + '204': + description: Success + /connections: + get: + tags: + - Connections + - Method GET + summary: Return client connections statistics + description: Returns statistics of client connections. + operationId: getConnections + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxConnections' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the connections statistics will be output. + delete: + tags: + - Connections + - Method DELETE + summary: Reset client connections statistics + description: Resets statistics of accepted and dropped + client connections. + operationId: deleteConnections + responses: + '204': + description: Success + /ssl: + get: + tags: + - SSL + - Method GET + summary: Return SSL statistics + description: Returns SSL statistics. + operationId: getSsl + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSSLObject' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of SSL statistics will be output. + delete: + tags: + - SSL + - Method DELETE + summary: Reset SSL statistics + description: Resets counters of SSL handshakes and session reuses. + operationId: deleteSslStat + responses: + '204': + description: Success + /slabs/: + get: + tags: + - Slabs + - Method GET + summary: Return status of all slabs + description: Returns status of slabs + for each shared memory zone with slab allocator. + operationId: getSlabs + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of slab zones will be output. + If the “fields” value is empty, + then only zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZoneMap' + '/slabs/{slabZoneName}': + parameters: + - name: slabZoneName + in: path + description: The name of the shared memory zone with slab allocator. + required: true + type: string + get: + tags: + - Slabs + - Method GET + summary: Return status of a slab + description: Returns status of slabs for a particular shared memory zone + with slab allocator. + operationId: getSlabZone + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the slab zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZone' + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Slabs + - Method DELETE + summary: Reset slab statistics + description: Resets the “reqs” and “fails” + metrics for each memory slot. + operationId: deleteSlabZoneStats + responses: + '204': + description: Success + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/: + get: + tags: + - HTTP + - Method GET + summary: Return list of HTTP-related endpoints + description: Returns a list of first level HTTP endpoints. + operationId: getHttp + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /http/requests: + get: + tags: + - HTTP Requests + - Method GET + summary: Return HTTP requests statistics + description: Returns status of client HTTP requests. + operationId: getHttpRequests + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of client HTTP requests statistics + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPRequests' + delete: + tags: + - HTTP Requests + - Method DELETE + summary: Reset HTTP requests statistics + description: Resets the number of total client HTTP requests. + operationId: deleteHttpRequests + responses: + '204': + description: Success + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/server_zones/: + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of all HTTP server zones + description: Returns status information for each HTTP + [server zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone). + operationId: getHttpServerZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZonesMap' + '/http/server_zones/{httpServerZoneName}': + parameters: + - name: httpServerZoneName + in: path + description: The name of an HTTP server zone. + type: string + required: true + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of an HTTP server zone + description: Returns status of a particular HTTP server zone. + operationId: getHttpServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZone' + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Server Zones + - Method DELETE + summary: Reset statistics for an HTTP server zone + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular HTTP server zone. + operationId: deleteHttpServerZoneStat + responses: + '204': + description: Success + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/caches/: + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of all caches + description: Returns status of each cache configured by + [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + and other “*_cache_path” directives. + operationId: getHttpCaches + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of cache zones will be output. + If the “fields” value is empty, + then only names of cache zones are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCachesMap' + '/http/caches/{httpCacheZoneName}': + parameters: + - name: httpCacheZoneName + in: path + description: The name of the cache zone. + type: string + required: true + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of a cache + description: Returns status of a particular cache. + operationId: getHttpCacheZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the cache zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCache' + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Caches + - Method DELETE + summary: Reset cache statistics + description: Resets statistics of cache hits/misses in a particular cache zone. + operationId: deleteHttpCacheZoneStat + responses: + '204': + description: Success + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/upstreams/: + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of all HTTP upstream server groups + description: Returns status of each HTTP upstream server group + and its servers. + operationId: getHttpUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamMap' + '/http/upstreams/{httpUpstreamName}/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an HTTP upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of an HTTP upstream server group + description: Returns status of a particular HTTP upstream server group + and its servers. + operationId: getHttpUpstreamName + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Reset statistics of an HTTP upstream server group + description: Resets the statistics for each upstream server + in an upstream server group and queue statistics. + operationId: deleteHttpUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of all servers in an HTTP upstream server group + description: Returns configuration of each server + in a particular HTTP upstream server group. + operationId: getHttpUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Upstreams + - Method POST + summary: Add a server to an HTTP upstream server group + description: Adds a new server to an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: postHttpUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postHttpUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/{httpUpstreamServerId}': + parameters: + - name: httpUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: httpUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of a server in an HTTP upstream server group + description: Returns configuration of a particular server + in the HTTP upstream server group. + operationId: getHttpUpstreamPeer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Upstreams + - Method PATCH + summary: Modify a server in an HTTP upstream server group + description: Modifies settings of a particular server + in an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: patchHttpUpstreamPeer + produces: + - application/json + parameters: + - in: body + name: patchHttpUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “*ID*” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Remove a server from an HTTP upstream server group + description: Removes a server from an HTTP upstream server group. + operationId: deleteHttpUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/keyvals/: + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from all HTTP keyval zones + description: Returns key-value pairs for each HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only HTTP keyval zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonesMap' + '/http/keyvals/{httpKeyvalZoneName}': + parameters: + - name: httpKeyvalZoneName + in: path + description: The name of an HTTP keyval shared memory zone. + required: true + type: string + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from an HTTP keyval zone + description: Returns key-value pairs stored in a particular HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the HTTP keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Keyvals + - Method POST + summary: Add a key-value pair to the HTTP keyval zone + description: Adds a new key-value pair to the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + operationId: postHttpKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + responses: + '201': + description: Created + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair + or deletes a key by setting the key value to null. + operationId: patchHttpKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: httpKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + responses: + '204': + description: Success + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Keyvals + - Method DELETE + summary: Empty the HTTP keyval zone + description: Deletes all key-value pairs from the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: deleteHttpKeyvalZoneData + responses: + '204': + description: Success + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/: + get: + tags: + - Stream + summary: Return list of stream-related endpoints + description: Returns a list of first level stream endpoints. + operationId: getStream + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /stream/server_zones/: + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of all stream server zones + description: Returns status information for each stream + [server zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone). + operationId: getStreamServerZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZonesMap' + '404': + description: > + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) + not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '/stream/server_zones/{streamServerZoneName}': + parameters: + - name: streamServerZoneName + in: path + description: The name of a stream server zone. + type: string + required: true + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of a stream server zone + description: Returns status of a particular stream server zone. + operationId: getStreamServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Server Zones + - Method DELETE + summary: Reset statistics for a stream server zone + description: Resets statistics of accepted and discarded connections, sessions, + received and sent bytes in a particular stream server zone. + operationId: deleteStreamServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/upstreams/: + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of all stream upstream server groups + description: Returns status of each stream upstream server group + and its servers. + operationId: getStreamUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamMap' + '404': + description: > + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) + not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/': + parameters: + - name: streamUpstreamName + in: path + description: The name of a stream upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of a stream upstream server group + description: Returns status of a particular stream upstream server group + and its servers. + operationId: getStreamUpstream + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Reset statistics of a stream upstream server group + description: Resets the statistics for each upstream server + in an upstream server group. + operationId: deleteStreamUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/': + parameters: + - name: streamUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of all servers in a stream upstream server group + description: Returns configuration of each server + in a particular stream upstream server group. + operationId: getStreamUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Upstreams + - Method POST + summary: Add a server to a stream upstream server group + description: Adds a new server to a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: postStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postStreamUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/{streamUpstreamServerId}': + parameters: + - name: streamUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: streamUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of a server in a stream upstream server group + description: Returns configuration of a particular server + in the stream upstream server group. + operationId: getStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Upstreams + - Method PATCH + summary: Modify a server in a stream upstream server group + description: Modifies settings of a particular server + in a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: patchStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: patchStreamUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “**ID**” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Remove a server from a stream upstream server group + description: Removes a server from a stream server group. + operationId: deleteStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/keyvals/: + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from all stream keyval zones + description: Returns key-value pairs for each stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only stream keyval zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZonesMap' + '404': + description: | + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '/stream/keyvals/{streamKeyvalZoneName}': + parameters: + - name: streamKeyvalZoneName + in: path + description: The name of a stream keyval shared memory zone. + required: true + type: string + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from a stream keyval zone + description: Returns key-value pairs stored in a particular stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the stream keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Keyvals + - Method POST + summary: Add a key-value pair to the stream keyval zone + description: Adds a new key-value pair to the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + operationId: postStreamKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + responses: + '201': + description: Created + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair + or deletes a key by setting the key value to null. + operationId: patchStreamKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: streamKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + responses: + '204': + description: Success + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Keyvals + - Method DELETE + summary: Empty the stream keyval zone + description: Deletes all key-value pairs from the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: deleteStreamKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' +### +###DEFINITIONS +### +definitions: + ArrayOfStrings: + title: Array + description: | + An array of strings. + type: array + items: + type: string + NginxObject: + title: nginx + description: | + General information about nginx: + type: object + properties: + version: + type: string + description: Version of nginx. + build: + type: string + description: Name of nginx build. + address: + type: string + description: The address of the server that accepted status request. + generation: + type: integer + description: The total number of configuration + reloads. + load_timestamp: + type: string + format: date-time + description: Time of the last reload of configuration, + in milliseconds since Epoch. + timestamp: + type: string + format: date-time + description: Current time in milliseconds since Epoch. + pid: + type: integer + description: The ID of the worker process that handled status request. + ppid: + type: integer + description: The ID of the master process that started the + worker process. + example: + nginx: + version: 1.13.3 + build: nginx-plus-r12-p3 + address: 206.251.255.64 + generation: 2 + load_timestamp: 2017-07-07T11:09:21.594Z + timestamp: 2017-07-11T09:31:13.477Z + pid: 32212 + ppid: 32210 + NginxProcesses: + title: Processes + type: object + properties: + respawned: + type: integer + description: The total number of abnormally terminated + and respawned child processes. + example: + respawned: 0 + NginxConnections: + title: Connections + description: | + The number of accepted, dropped, active, and idle connections. + type: object + properties: + accepted: + type: integer + description: The total number of accepted client connections. + dropped: + type: integer + description: The total number of dropped client connections. + active: + type: integer + description: The current number of active client connections. + idle: + type: integer + description: The current number of idle client connections. + example: + accepted: 4968119 + dropped: 0 + active: 5 + idle: 117 + NginxSSLObject: + title: SSL + type: object + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + example: + handshakes: 79572 + handshakes_failed: 21025 + session_reuses: 15762 + NginxSlabZoneMap: + title: Slab Zones + description: | + Status zones that use slab allocator. + type: object + additionalProperties: + $ref: '#/definitions/NginxSlabZone' + example: + http_cache: + pages: + used: 2 + free: 2452 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 1 + free: 126 + reqs: 1 + fails: 0 + 64: + used: 2 + free: 62 + reqs: 2 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + dns-backends: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZone: + title: Shared memory zone with slab allocator + description: | + type: object + properties: + pages: + type: object + description: The number of free and used memory pages. + properties: + used: + type: integer + description: The current number of used memory pages. + free: + type: integer + description: The current number of free memory pages. + slots: + type: object + title: Memory Slots + description: Status data for memory slots (8, 16, 32, 64, 128, etc.) + additionalProperties: + $ref: '#/definitions/NginxSlabZoneSlot' + example: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZoneSlot: + title: Memory Slot + type: object + properties: + used: + type: integer + description: The current number of used memory slots. + free: + type: integer + description: The current number of free memory slots. + reqs: + type: integer + description: The total number of attempts + to allocate memory of specified size. + fails: + type: integer + description: The number of unsuccessful attempts + to allocate memory of specified size. + NginxHTTPRequests: + title: HTTP Requests + type: object + properties: + total: + type: integer + description: The total number of client requests. + current: + type: integer + description: The current number of client requests. + example: + total: 10624511 + current: 4 + NginxHTTPServerZonesMap: + title: HTTP Server Zones + description: | + Status data for all HTTP + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPServerZone' + example: + site1: + processing: 2 + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + processing: 1 + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPServerZone: + title: HTTP Server Zone + type: object + properties: + processing: + type: integer + description: The number of client requests + that are currently being processed. + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients and the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + processing: 1 + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPCachesMap: + title: HTTP Caches + description: | + Status information of all HTTP caches configured by + proxy_cache_path + and other “*_cache_path” directives. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPCache' + example: + http-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + frontend-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPCache: + title: HTTP Cache + type: object + properties: + size: + type: integer + description: The current size of the cache. + max_size: + type: integer + description: The limit on the maximum size of the cache + specified in the configuration. + cold: + type: boolean + description: A boolean value indicating whether the “cache loader” process + is still loading data from disk into the cache. + hit: + type: object + properties: + responses: + type: integer + description: The total number of + valid + responses read from the cache. + bytes: + type: integer + description: The total number of bytes read from the cache. + stale: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + updating: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache + while responses were being updated (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + revalidated: + type: object + properties: + responses: + type: integer + description: The total number of expired and revalidated responses + read from the cache (see + proxy_cache_revalidate + and other “*_cache_revalidate” directives. + bytes: + type: integer + description: The total number of bytes read from the cache. + miss: + type: object + properties: + responses: + type: integer + description: The total number of responses not found in the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + expired: + type: object + properties: + responses: + type: integer + description: The total number of expired responses not taken from the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + bypass: + type: object + properties: + responses: + type: integer + description: The total number of responses not looked up in the cache due to the + proxy_cache_bypass + and other “*_cache_bypass” directives. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + example: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPUpstreamMap: + title: HTTP Upstreams + description: | + Status information of all HTTP + dynamically configurable + groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPUpstream' + example: + trac-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: trac-backend + hg-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: hg-backend + NginxHTTPUpstream: + title: HTTP Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxHTTPUpstreamPeerMap' + keepalive: + type: integer + description: The current number of idle + keepalive + connections. + zombies: + type: integer + description: The current number of servers removed + from the group but still processing active client requests. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + queue: + type: object + description: > + For the requests + queue, + the following data are provided: + properties: + size: + type: integer + description: The current number of requests in the queue. + max_size: + type: integer + description: The maximum number of requests that can be in the queue + at the same time. + overflows: + type: integer + description: The total number of requests rejected due to the queue overflow. + example: + upstream_backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 20 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 20 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: upstream_backend + NginxHTTPUpstreamPeerMap: + title: HTTP Upstream Servers + description: | + An array of HTTP + upstream servers. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamPeer' + NginxHTTPUpstreamPeer: + title: HTTP Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + enum: + - up + - draining + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “draining”, “down”, + “unavail”, “checking”, + and “unhealthy”. + active: + type: integer + description: The current number of active connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + requests: + type: integer + description: The total number of client requests forwarded to this server. + readOnly: true + responses: + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses obtained from this server. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client requests + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + fails: + type: integer + description: The number of failed health checks. + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + last_passed: + type: boolean + description: Boolean indicating if the last health check request was successful + and passed + tests. + downtime: + type: integer + readOnly: true + description: Total time the server was in the “unavail”, + “checking”, and “unhealthy” states. + downstart: + type: string + format: date-time + readOnly: true + description: The time (in milliseconds since Epoch) when the server became + “unavail”, “checking”, + or “unhealthy”. + selected: + type: string + format: date-time + readOnly: true + description: The time (in milliseconds since Epoch) + when the server was last selected to process a request. + header_time: + type: integer + readOnly: true + description: The average time to get the + response header + from the server. + response_time: + type: integer + readOnly: true + description: The average time to get the + full response + from the server. + NginxHTTPUpstreamConfServerMap: + title: HTTP Upstream Servers + description: An array of HTTP upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:8088 + weight: 1 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: false + down: false + - id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPUpstreamConfServer: + title: HTTP Upstream Server + description: | + Dynamically configurable parameters of an HTTP upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the HTTP upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the HTTP upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “http” block. + See also the + resolve + parameter of the HTTP upstream server. + service: + type: string + description: Same as the + service + parameter of the HTTP upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the HTTP upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the HTTP upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the HTTP upstream server. + fail_timeout: + type: integer + description: Same as the + fail_timeout + parameter of the HTTP upstream server. + slow_start: + type: integer + description: Same as the + slow_start + parameter of the HTTP upstream server. + route: + type: string + description: Same as the + route + parameter of the HTTP upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the HTTP upstream server. + drain: + type: boolean + description: Same as the + drain + parameter of the HTTP upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPKeyvalZonesMap: + title: HTTP Keyval Shared Memory Zones + description: | + Contents of all HTTP keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxHTTPKeyvalZone: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamServerZonesMap: + title: Stream Server Zones + description: | + Status information for all stream + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamServerZone' + example: + mysql-frontend: + processing: 2 + connections: 270925 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 270925 + discarded: 0 + received: 28988975 + sent: 3879346317 + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamServerZone: + title: Stream Server Zone + type: object + properties: + processing: + type: integer + description: The number of client connections + that are currently being processed. + connections: + type: integer + description: The total number of connections accepted from clients. + sessions: + type: object + description: The total number of completed sessions, + and the number of sessions completed with status codes + “2xx”, “4xx”, or “5xx”. + properties: + 2xx: + type: integer + description: The total number of sessions completed with + status codes + “2xx”. + 4xx: + type: integer + description: The total number of sessions completed with + status codes + “4xx”. + 5xx: + type: integer + description: The total number of sessions completed with + status codes + “5xx”. + total: + type: integer + description: The total number of completed client sessions. + discarded: + type: integer + description: The total number of + connections completed without creating a session. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamUpstreamMap: + title: Stream Upstreams + description: Status information of stream upstream server groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamUpstream' + example: + mysql_backends: + peers: + - id: 0 + server: 10.0.0.1:12345 + name: 10.0.0.1:12345 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connecions: 1231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12346 + name: 10.0.0.1:12346 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 30 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: mysql_backends + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + connections: 0 + max_conns: 30 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstream: + title: Stream Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxStreamUpstreamPeerMap' + zombies: + type: integer + description: The current number of servers removed from the group + but still processing active client connections. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + example: + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 50 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 50 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstreamPeerMap: + title: Stream Upstream Servers + description: Array of stream upstream servers. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamPeer' + NginxStreamUpstreamPeer: + title: Stream Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An + address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + format: hostname + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + readOnly: true + enum: + - up + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “down”, “unavail”, + “checking”, or “unhealthy”. + active: + type: integer + description: The current number of connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + connections: + type: integer + description: The total number of client connections forwarded to this server. + readOnly: true + connect_time: + type: integer + description: The average time to connect to the upstream server. + readOnly: true + first_byte_time: + type: integer + description: The average time to receive the first byte of data. + readOnly: true + response_time: + type: integer + description: The average time to receive the last byte of data. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client connections + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + readOnly: true + fails: + type: integer + description: The number of failed health checks. + readOnly: true + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + readOnly: true + last_passed: + type: boolean + description: Boolean indicating whether the last health check request + was successful and passed + tests. + readOnly: true + downtime: + type: integer + description: Total time the server was in the + “unavail”, “checking”, + and “unhealthy” states. + readOnly: true + downstart: + type: string + format: date-time + description: The time (in milliseconds since Epoch) when the server became + “unavail”, “checking”, + or “unhealthy”. + readOnly: true + selected: + type: string + format: date-time + description: The time (in milliseconds since Epoch) + when the server was last selected to process a connection. + readOnly: true + NginxStreamUpstreamConfServerMap: + title: Stream Upstream Servers + description: | + An array of stream upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + - id: 1 + server: 10.0.0.1:12349 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamUpstreamConfServer: + title: Stream Upstream Server + description: | + Dynamically configurable parameters of a stream upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the stream upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the stream upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “stream” block. + See also the + resolve + parameter of the stream upstream server. + service: + type: string + description: Same as the + service + parameter of the stream upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the stream upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the stream upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the stream upstream server. + fail_timeout: + type: integer + description: Same as the + fail_timeout + parameter of the stream upstream server. + slow_start: + type: integer + description: Same as the + slow_start + parameter of the stream upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the stream upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamKeyvalZonesMap: + title: Stream Keyval Shared Memory Zones + description: | + Contents of all stream keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxStreamKeyvalZone: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxError: + title: Error + description: | + nginx error object. + type: object + properties: + path: + type: string + description: API path. + method: + type: string + description: HTTP method. + error: + type: object + properties: + status: + type: string + description: HTTP error code. + text: + type: string + description: Error description. + code: + type: string + description: Internal nginx error code. + request_id: + type: string + description: The ID of the request, equals the value of the + $request_id + variable. + href: + type: string + description: Link to reference documentation. diff --git a/content/nginx/admin-guide/yaml/v3/nginx_api.yaml b/content/nginx/admin-guide/yaml/v3/nginx_api.yaml new file mode 100644 index 000000000..f7b80ff21 --- /dev/null +++ b/content/nginx/admin-guide/yaml/v3/nginx_api.yaml @@ -0,0 +1,3254 @@ +swagger: '2.0' +info: + version: '3.0' + title: NGINX Plus REST API + description: NGINX Plus REST + [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + provides access to NGINX Plus status information, + on-the-fly configuration of upstream servers and + key-value pairs management for + [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and + [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). +basePath: /api/3 +tags: + - name: General Info + - name: Processes + - name: Connections + - name: SSL + - name: Slabs + - name: HTTP + - name: HTTP Requests + - name: HTTP Caches + - name: HTTP Server Zones + - name: HTTP Upstreams + - name: HTTP Keyvals + - name: Stream + - name: Stream Server Zones + - name: Stream Upstreams + - name: Stream Keyvals + - name: Stream Zone Sync + - name: Method GET + - name: Method POST + - name: Method PATCH + - name: Method DELETE +schemes: + - http +paths: + /: + get: + tags: + - General Info + - Method GET + summary: Return list of root endpoints + description: Returns a list of root endpoints. + operationId: getAPIEndpoints + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /nginx: + get: + tags: + - General Info + - Method GET + summary: Return status of nginx running instance + description: Returns nginx version, build name, address, + number of configuration reloads, IDs of master and worker processes. + operationId: getNginx + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of nginx running instance will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxObject' + /processes: + get: + tags: + - Processes + - Method GET + summary: Return nginx processes status + description: Returns the number of abnormally terminated + and respawned child processes. + operationId: getProcesses + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxProcesses' + delete: + tags: + - Processes + - Method DELETE + summary: Reset nginx processes statistics + description: Resets counters of abnormally terminated and respawned + child processes. + operationId: deleteProcesses + responses: + '204': + description: Success + /connections: + get: + tags: + - Connections + - Method GET + summary: Return client connections statistics + description: Returns statistics of client connections. + operationId: getConnections + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxConnections' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the connections statistics will be output. + delete: + tags: + - Connections + - Method DELETE + summary: Reset client connections statistics + description: Resets statistics of accepted and dropped + client connections. + operationId: deleteConnections + responses: + '204': + description: Success + /ssl: + get: + tags: + - SSL + - Method GET + summary: Return SSL statistics + description: Returns SSL statistics. + operationId: getSsl + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSSLObject' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of SSL statistics will be output. + delete: + tags: + - SSL + - Method DELETE + summary: Reset SSL statistics + description: Resets counters of SSL handshakes and session reuses. + operationId: deleteSslStat + responses: + '204': + description: Success + /slabs/: + get: + tags: + - Slabs + - Method GET + summary: Return status of all slabs + description: Returns status of slabs + for each shared memory zone with slab allocator. + operationId: getSlabs + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of slab zones will be output. + If the “fields” value is empty, + then only zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZoneMap' + '/slabs/{slabZoneName}': + parameters: + - name: slabZoneName + in: path + description: The name of the shared memory zone with slab allocator. + required: true + type: string + get: + tags: + - Slabs + - Method GET + summary: Return status of a slab + description: Returns status of slabs for a particular shared memory zone + with slab allocator. + operationId: getSlabZone + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the slab zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZone' + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Slabs + - Method DELETE + summary: Reset slab statistics + description: Resets the “reqs” and “fails” + metrics for each memory slot. + operationId: deleteSlabZoneStats + responses: + '204': + description: Success + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/: + get: + tags: + - HTTP + - Method GET + summary: Return list of HTTP-related endpoints + description: Returns a list of first level HTTP endpoints. + operationId: getHttp + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /http/requests: + get: + tags: + - HTTP Requests + - Method GET + summary: Return HTTP requests statistics + description: Returns status of client HTTP requests. + operationId: getHttpRequests + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of client HTTP requests statistics + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPRequests' + delete: + tags: + - HTTP Requests + - Method DELETE + summary: Reset HTTP requests statistics + description: Resets the number of total client HTTP requests. + operationId: deleteHttpRequests + responses: + '204': + description: Success + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/server_zones/: + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of all HTTP server zones + description: Returns status information for each HTTP + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getHttpServerZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZonesMap' + '/http/server_zones/{httpServerZoneName}': + parameters: + - name: httpServerZoneName + in: path + description: The name of an HTTP server zone. + type: string + required: true + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of an HTTP server zone + description: Returns status of a particular HTTP server zone. + operationId: getHttpServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZone' + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Server Zones + - Method DELETE + summary: Reset statistics for an HTTP server zone + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular HTTP server zone. + operationId: deleteHttpServerZoneStat + responses: + '204': + description: Success + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/caches/: + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of all caches + description: Returns status of each cache configured by + [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + and other “*_cache_path” directives. + operationId: getHttpCaches + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of cache zones will be output. + If the “fields” value is empty, + then only names of cache zones are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCachesMap' + '/http/caches/{httpCacheZoneName}': + parameters: + - name: httpCacheZoneName + in: path + description: The name of the cache zone. + type: string + required: true + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of a cache + description: Returns status of a particular cache. + operationId: getHttpCacheZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the cache zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCache' + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Caches + - Method DELETE + summary: Reset cache statistics + description: Resets statistics of cache hits/misses in a particular cache zone. + operationId: deleteHttpCacheZoneStat + responses: + '204': + description: Success + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/upstreams/: + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of all HTTP upstream server groups + description: Returns status of each HTTP upstream server group + and its servers. + operationId: getHttpUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamMap' + '/http/upstreams/{httpUpstreamName}/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an HTTP upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of an HTTP upstream server group + description: Returns status of a particular HTTP upstream server group + and its servers. + operationId: getHttpUpstreamName + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Reset statistics of an HTTP upstream server group + description: Resets the statistics for each upstream server + in an upstream server group and queue statistics. + operationId: deleteHttpUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of all servers in an HTTP upstream server group + description: Returns configuration of each server + in a particular HTTP upstream server group. + operationId: getHttpUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Upstreams + - Method POST + summary: Add a server to an HTTP upstream server group + description: Adds a new server to an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: postHttpUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postHttpUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/{httpUpstreamServerId}': + parameters: + - name: httpUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: httpUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of a server in an HTTP upstream server group + description: Returns configuration of a particular server + in the HTTP upstream server group. + operationId: getHttpUpstreamPeer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Upstreams + - Method PATCH + summary: Modify a server in an HTTP upstream server group + description: Modifies settings of a particular server + in an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: patchHttpUpstreamPeer + produces: + - application/json + parameters: + - in: body + name: patchHttpUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “*ID*” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Remove a server from an HTTP upstream server group + description: Removes a server from an HTTP upstream server group. + operationId: deleteHttpUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/keyvals/: + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from all HTTP keyval zones + description: Returns key-value pairs for each HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only HTTP keyval zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonesMap' + '/http/keyvals/{httpKeyvalZoneName}': + parameters: + - name: httpKeyvalZoneName + in: path + description: The name of an HTTP keyval shared memory zone. + required: true + type: string + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from an HTTP keyval zone + description: Returns key-value pairs stored in a particular HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the HTTP keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Keyvals + - Method POST + summary: Add a key-value pair to the HTTP keyval zone + description: Adds a new key-value pair to the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + operationId: postHttpKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + responses: + '201': + description: Created + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair + or deletes a key by setting the key value to null. + operationId: patchHttpKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: httpKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + responses: + '204': + description: Success + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Keyvals + - Method DELETE + summary: Empty the HTTP keyval zone + description: Deletes all key-value pairs from the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: deleteHttpKeyvalZoneData + responses: + '204': + description: Success + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/: + get: + tags: + - Stream + summary: Return list of stream-related endpoints + description: Returns a list of first level stream endpoints. + operationId: getStream + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /stream/server_zones/: + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of all stream server zones + description: Returns status information for each stream + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getStreamServerZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZonesMap' + '404': + description: > + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) + not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '/stream/server_zones/{streamServerZoneName}': + parameters: + - name: streamServerZoneName + in: path + description: The name of a stream server zone. + type: string + required: true + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of a stream server zone + description: Returns status of a particular stream server zone. + operationId: getStreamServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Server Zones + - Method DELETE + summary: Reset statistics for a stream server zone + description: Resets statistics of accepted and discarded connections, sessions, + received and sent bytes in a particular stream server zone. + operationId: deleteStreamServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/upstreams/: + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of all stream upstream server groups + description: Returns status of each stream upstream server group + and its servers. + operationId: getStreamUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamMap' + '404': + description: > + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) + not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/': + parameters: + - name: streamUpstreamName + in: path + description: The name of a stream upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of a stream upstream server group + description: Returns status of a particular stream upstream server group + and its servers. + operationId: getStreamUpstream + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Reset statistics of a stream upstream server group + description: Resets the statistics for each upstream server + in an upstream server group. + operationId: deleteStreamUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/': + parameters: + - name: streamUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of all servers in a stream upstream server group + description: Returns configuration of each server + in a particular stream upstream server group. + operationId: getStreamUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Upstreams + - Method POST + summary: Add a server to a stream upstream server group + description: Adds a new server to a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: postStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postStreamUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/{streamUpstreamServerId}': + parameters: + - name: streamUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: streamUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of a server in a stream upstream server group + description: Returns configuration of a particular server + in the stream upstream server group. + operationId: getStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Upstreams + - Method PATCH + summary: Modify a server in a stream upstream server group + description: Modifies settings of a particular server + in a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: patchStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: patchStreamUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “**ID**” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Remove a server from a stream upstream server group + description: Removes a server from a stream server group. + operationId: deleteStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/keyvals/: + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from all stream keyval zones + description: Returns key-value pairs for each stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only stream keyval zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZonesMap' + '404': + description: | + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '/stream/keyvals/{streamKeyvalZoneName}': + parameters: + - name: streamKeyvalZoneName + in: path + description: The name of a stream keyval shared memory zone. + required: true + type: string + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from a stream keyval zone + description: Returns key-value pairs stored in a particular stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the stream keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Keyvals + - Method POST + summary: Add a key-value pair to the stream keyval zone + description: Adds a new key-value pair to the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + operationId: postStreamKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + responses: + '201': + description: Created + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair + or deletes a key by setting the key value to null. + operationId: patchStreamKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: streamKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + responses: + '204': + description: Success + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Keyvals + - Method DELETE + summary: Empty the stream keyval zone + description: Deletes all key-value pairs from the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: deleteStreamKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/zone_sync/: + get: + tags: + - Stream Zone Sync + - Method GET + summary: Return sync status of a node + description: Returns synchronization status of a cluster node. + operationId: getStreamZoneSync + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamZoneSync' + '404': + description: | + Zone sync not configured (*ZoneSyncNotConfigured*), + [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) not configured (*StreamNotConfigured*) + schema: + $ref: '#/definitions/NginxError' +### +###DEFINITIONS +### +definitions: + ArrayOfStrings: + title: Array + description: | + An array of strings. + type: array + items: + type: string + NginxObject: + title: nginx + description: | + General information about nginx: + type: object + properties: + version: + type: string + description: Version of nginx. + build: + type: string + description: Name of nginx build. + address: + type: string + description: The address of the server that accepted status request. + generation: + type: integer + description: The total number of configuration + reloads. + load_timestamp: + type: string + format: date-time + description: Time of the last reload of configuration, + in the ISO 8601 format with millisecond resolution. + timestamp: + type: string + format: date-time + description: Current time + in the ISO 8601 format with millisecond resolution. + pid: + type: integer + description: The ID of the worker process that handled status request. + ppid: + type: integer + description: The ID of the master process that started the + worker process. + example: + nginx: + version: 1.15.2 + build: nginx-plus-r16 + address: 206.251.255.64 + generation: 6 + load_timestamp: 2018-10-08T09:05:48.776Z + timestamp: 2018-10-08T15:23:17.056Z + pid: 32212 + ppid: 32210 + NginxProcesses: + title: Processes + type: object + properties: + respawned: + type: integer + description: The total number of abnormally terminated + and respawned child processes. + example: + respawned: 0 + NginxConnections: + title: Connections + description: | + The number of accepted, dropped, active, and idle connections. + type: object + properties: + accepted: + type: integer + description: The total number of accepted client connections. + dropped: + type: integer + description: The total number of dropped client connections. + active: + type: integer + description: The current number of active client connections. + idle: + type: integer + description: The current number of idle client connections. + example: + accepted: 4968119 + dropped: 0 + active: 5 + idle: 117 + NginxSSLObject: + title: SSL + type: object + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + example: + handshakes: 79572 + handshakes_failed: 21025 + session_reuses: 15762 + NginxSlabZoneMap: + title: Slab Zones + description: | + Status zones that use slab allocator. + type: object + additionalProperties: + $ref: '#/definitions/NginxSlabZone' + example: + http_cache: + pages: + used: 2 + free: 2452 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 1 + free: 126 + reqs: 1 + fails: 0 + 64: + used: 2 + free: 62 + reqs: 2 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + dns-backends: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZone: + title: Shared memory zone with slab allocator + description: | + type: object + properties: + pages: + type: object + description: The number of free and used memory pages. + properties: + used: + type: integer + description: The current number of used memory pages. + free: + type: integer + description: The current number of free memory pages. + slots: + type: object + title: Memory Slots + description: Status data for memory slots (8, 16, 32, 64, 128, etc.) + additionalProperties: + $ref: '#/definitions/NginxSlabZoneSlot' + example: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZoneSlot: + title: Memory Slot + type: object + properties: + used: + type: integer + description: The current number of used memory slots. + free: + type: integer + description: The current number of free memory slots. + reqs: + type: integer + description: The total number of attempts + to allocate memory of specified size. + fails: + type: integer + description: The number of unsuccessful attempts + to allocate memory of specified size. + NginxHTTPRequests: + title: HTTP Requests + type: object + properties: + total: + type: integer + description: The total number of client requests. + current: + type: integer + description: The current number of client requests. + example: + total: 10624511 + current: 4 + NginxHTTPServerZonesMap: + title: HTTP Server Zones + description: | + Status data for all HTTP + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPServerZone' + example: + site1: + processing: 2 + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + processing: 1 + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPServerZone: + title: HTTP Server Zone + type: object + properties: + processing: + type: integer + description: The number of client requests + that are currently being processed. + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients and the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + processing: 1 + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPCachesMap: + title: HTTP Caches + description: | + Status information of all HTTP caches configured by + proxy_cache_path + and other “*_cache_path” directives. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPCache' + example: + http-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + frontend-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPCache: + title: HTTP Cache + type: object + properties: + size: + type: integer + description: The current size of the cache. + max_size: + type: integer + description: The limit on the maximum size of the cache + specified in the configuration. + cold: + type: boolean + description: A boolean value indicating whether the “cache loader” process + is still loading data from disk into the cache. + hit: + type: object + properties: + responses: + type: integer + description: The total number of + valid + responses read from the cache. + bytes: + type: integer + description: The total number of bytes read from the cache. + stale: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + updating: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache + while responses were being updated (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + revalidated: + type: object + properties: + responses: + type: integer + description: The total number of expired and revalidated responses + read from the cache (see + proxy_cache_revalidate + and other “*_cache_revalidate” directives. + bytes: + type: integer + description: The total number of bytes read from the cache. + miss: + type: object + properties: + responses: + type: integer + description: The total number of responses not found in the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + expired: + type: object + properties: + responses: + type: integer + description: The total number of expired responses not taken from the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + bypass: + type: object + properties: + responses: + type: integer + description: The total number of responses not looked up in the cache due to the + proxy_cache_bypass + and other “*_cache_bypass” directives. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + example: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPUpstreamMap: + title: HTTP Upstreams + description: | + Status information of all HTTP + dynamically configurable + groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPUpstream' + example: + trac-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: trac-backend + hg-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: hg-backend + NginxHTTPUpstream: + title: HTTP Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxHTTPUpstreamPeerMap' + keepalive: + type: integer + description: The current number of idle + keepalive + connections. + zombies: + type: integer + description: The current number of servers removed + from the group but still processing active client requests. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + queue: + type: object + description: > + For the requests + queue, + the following data are provided: + properties: + size: + type: integer + description: The current number of requests in the queue. + max_size: + type: integer + description: The maximum number of requests that can be in the queue + at the same time. + overflows: + type: integer + description: The total number of requests rejected due to the queue overflow. + example: + upstream_backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 20 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 20 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: upstream_backend + NginxHTTPUpstreamPeerMap: + title: HTTP Upstream Servers + description: | + An array of HTTP + upstream servers. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamPeer' + NginxHTTPUpstreamPeer: + title: HTTP Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + enum: + - up + - draining + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “draining”, “down”, + “unavail”, “checking”, + and “unhealthy”. + active: + type: integer + description: The current number of active connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + requests: + type: integer + description: The total number of client requests forwarded to this server. + readOnly: true + responses: + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses obtained from this server. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client requests + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + fails: + type: integer + description: The number of failed health checks. + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + last_passed: + type: boolean + description: Boolean indicating if the last health check request was successful + and passed + tests. + downtime: + type: integer + readOnly: true + description: Total time the server was in the “unavail”, + “checking”, and “unhealthy” states. + downstart: + type: string + format: date-time + readOnly: true + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + selected: + type: string + format: date-time + readOnly: true + description: The time when the server was last selected to process a request, + in the ISO 8601 format with millisecond resolution. + header_time: + type: integer + readOnly: true + description: The average time to get the + response header + from the server. + response_time: + type: integer + readOnly: true + description: The average time to get the + full response + from the server. + NginxHTTPUpstreamConfServerMap: + title: HTTP Upstream Servers + description: An array of HTTP upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:8088 + weight: 1 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: false + down: false + - id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPUpstreamConfServer: + title: HTTP Upstream Server + description: | + Dynamically configurable parameters of an HTTP upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the HTTP upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the HTTP upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “http” block. + See also the + resolve + parameter of the HTTP upstream server. + service: + type: string + description: Same as the + service + parameter of the HTTP upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the HTTP upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the HTTP upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the HTTP upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the HTTP upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the HTTP upstream server. + route: + type: string + description: Same as the + route + parameter of the HTTP upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the HTTP upstream server. + drain: + type: boolean + description: Same as the + drain + parameter of the HTTP upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPKeyvalZonesMap: + title: HTTP Keyval Shared Memory Zones + description: | + Contents of all HTTP keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxHTTPKeyvalZone: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamServerZonesMap: + title: Stream Server Zones + description: | + Status information for all stream + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamServerZone' + example: + mysql-frontend: + processing: 2 + connections: 270925 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 270925 + discarded: 0 + received: 28988975 + sent: 3879346317 + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamServerZone: + title: Stream Server Zone + type: object + properties: + processing: + type: integer + description: The number of client connections + that are currently being processed. + connections: + type: integer + description: The total number of connections accepted from clients. + sessions: + type: object + description: The total number of completed sessions, + and the number of sessions completed with status codes + “2xx”, “4xx”, or “5xx”. + properties: + 2xx: + type: integer + description: The total number of sessions completed with + status codes + “2xx”. + 4xx: + type: integer + description: The total number of sessions completed with + status codes + “4xx”. + 5xx: + type: integer + description: The total number of sessions completed with + status codes + “5xx”. + total: + type: integer + description: The total number of completed client sessions. + discarded: + type: integer + description: The total number of + connections completed without creating a session. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamUpstreamMap: + title: Stream Upstreams + description: Status information of stream upstream server groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamUpstream' + example: + mysql_backends: + peers: + - id: 0 + server: 10.0.0.1:12345 + name: 10.0.0.1:12345 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connecions: 1231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12346 + name: 10.0.0.1:12346 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 30 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: mysql_backends + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + connections: 0 + max_conns: 30 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstream: + title: Stream Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxStreamUpstreamPeerMap' + zombies: + type: integer + description: The current number of servers removed from the group + but still processing active client connections. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + example: + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 50 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 50 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstreamPeerMap: + title: Stream Upstream Servers + description: Array of stream upstream servers. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamPeer' + NginxStreamUpstreamPeer: + title: Stream Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An + address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + format: hostname + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + readOnly: true + enum: + - up + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “down”, “unavail”, + “checking”, or “unhealthy”. + active: + type: integer + description: The current number of connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + connections: + type: integer + description: The total number of client connections forwarded to this server. + readOnly: true + connect_time: + type: integer + description: The average time to connect to the upstream server. + readOnly: true + first_byte_time: + type: integer + description: The average time to receive the first byte of data. + readOnly: true + response_time: + type: integer + description: The average time to receive the last byte of data. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client connections + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + readOnly: true + fails: + type: integer + description: The number of failed health checks. + readOnly: true + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + readOnly: true + last_passed: + type: boolean + description: Boolean indicating whether the last health check request + was successful and passed + tests. + readOnly: true + downtime: + type: integer + description: Total time the server was in the + “unavail”, “checking”, + and “unhealthy” states. + readOnly: true + downstart: + type: string + format: date-time + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + readOnly: true + selected: + type: string + format: date-time + description: The time when the server was last selected + to process a connection, + in the ISO 8601 format with millisecond resolution. + readOnly: true + NginxStreamUpstreamConfServerMap: + title: Stream Upstream Servers + description: | + An array of stream upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + - id: 1 + server: 10.0.0.1:12349 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamUpstreamConfServer: + title: Stream Upstream Server + description: | + Dynamically configurable parameters of a stream upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the stream upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the stream upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “stream” block. + See also the + resolve + parameter of the stream upstream server. + service: + type: string + description: Same as the + service + parameter of the stream upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the stream upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the stream upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the stream upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the stream upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the stream upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the stream upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamKeyvalZonesMap: + title: Stream Keyval Shared Memory Zones + description: | + Contents of all stream keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxStreamKeyvalZone: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamZoneSync: + title: Stream Zone Sync Node + type: object + properties: + zones: + type: object + title: Zone Sync Zones + description: Synchronization information per each shared memory zone. + additionalProperties: + $ref: '#/definitions/NginxStreamZoneSyncZone' + status: + type: object + description: Synchronization information per node in a cluster. + properties: + bytes_in: + type: integer + description: The number of bytes received by this node. + msgs_in: + type: integer + description: The number of messages received by this node. + msgs_out: + type: integer + description: The number of messages sent by this node. + bytes_out: + type: integer + description: The number of bytes sent by this node. + nodes_online: + type: integer + description: The number of peers this node is connected to. + example: + zones: + zone1: + records_pending: 2061 + records_total: 260575 + zone2: + records_pending: 0 + records_total: 14749 + status: + bytes_in: 1364923761 + msgs_in: 337236 + msgs_out: 346717 + bytes_out: 1402765472 + nodes_online: 15 + NginxStreamZoneSyncZone: + title: Sync Zone + description: Synchronization status of a shared memory zone. + type: object + properties: + records_pending: + type: integer + description: The number of records that need to be sent to the cluster. + records_total: + type: integer + description: The total number of records stored in the shared memory zone. + NginxError: + title: Error + description: | + nginx error object. + type: object + properties: + path: + type: string + description: API path. + method: + type: string + description: HTTP method. + error: + type: object + properties: + status: + type: integer + description: HTTP error code. + text: + type: string + description: Error description. + code: + type: string + description: Internal nginx error code. + request_id: + type: string + description: The ID of the request, equals the value of the + $request_id + variable. + href: + type: string + description: Link to reference documentation. diff --git a/content/nginx/admin-guide/yaml/v4/nginx_api.yaml b/content/nginx/admin-guide/yaml/v4/nginx_api.yaml new file mode 100644 index 000000000..583013a66 --- /dev/null +++ b/content/nginx/admin-guide/yaml/v4/nginx_api.yaml @@ -0,0 +1,3228 @@ +swagger: '2.0' +info: + version: '4.0' + title: NGINX Plus REST API + description: NGINX Plus REST + [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + provides access to NGINX Plus status information, + on-the-fly configuration of upstream servers and + key-value pairs management for + [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and + [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). +basePath: /api/4 +tags: + - name: General Info + - name: Processes + - name: Connections + - name: SSL + - name: Slabs + - name: HTTP + - name: HTTP Requests + - name: HTTP Caches + - name: HTTP Server Zones + - name: HTTP Upstreams + - name: HTTP Keyvals + - name: Stream + - name: Stream Server Zones + - name: Stream Upstreams + - name: Stream Keyvals + - name: Stream Zone Sync + - name: Method GET + - name: Method POST + - name: Method PATCH + - name: Method DELETE +schemes: + - http +paths: + /: + get: + tags: + - General Info + - Method GET + summary: Return list of root endpoints + description: Returns a list of root endpoints. + operationId: getAPIEndpoints + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /nginx: + get: + tags: + - General Info + - Method GET + summary: Return status of nginx running instance + description: Returns nginx version, build name, address, + number of configuration reloads, IDs of master and worker processes. + operationId: getNginx + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of nginx running instance will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxObject' + /processes: + get: + tags: + - Processes + - Method GET + summary: Return nginx processes status + description: Returns the number of abnormally terminated + and respawned child processes. + operationId: getProcesses + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxProcesses' + delete: + tags: + - Processes + - Method DELETE + summary: Reset nginx processes statistics + description: Resets counters of abnormally terminated and respawned + child processes. + operationId: deleteProcesses + responses: + '204': + description: Success + /connections: + get: + tags: + - Connections + - Method GET + summary: Return client connections statistics + description: Returns statistics of client connections. + operationId: getConnections + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxConnections' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the connections statistics will be output. + delete: + tags: + - Connections + - Method DELETE + summary: Reset client connections statistics + description: Resets statistics of accepted and dropped + client connections. + operationId: deleteConnections + responses: + '204': + description: Success + /ssl: + get: + tags: + - SSL + - Method GET + summary: Return SSL statistics + description: Returns SSL statistics. + operationId: getSsl + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSSLObject' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of SSL statistics will be output. + delete: + tags: + - SSL + - Method DELETE + summary: Reset SSL statistics + description: Resets counters of SSL handshakes and session reuses. + operationId: deleteSslStat + responses: + '204': + description: Success + /slabs/: + get: + tags: + - Slabs + - Method GET + summary: Return status of all slabs + description: Returns status of slabs + for each shared memory zone with slab allocator. + operationId: getSlabs + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of slab zones will be output. + If the “fields” value is empty, + then only zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZoneMap' + '/slabs/{slabZoneName}': + parameters: + - name: slabZoneName + in: path + description: The name of the shared memory zone with slab allocator. + required: true + type: string + get: + tags: + - Slabs + - Method GET + summary: Return status of a slab + description: Returns status of slabs for a particular shared memory zone + with slab allocator. + operationId: getSlabZone + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the slab zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZone' + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Slabs + - Method DELETE + summary: Reset slab statistics + description: Resets the “reqs” and “fails” + metrics for each memory slot. + operationId: deleteSlabZoneStats + responses: + '204': + description: Success + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/: + get: + tags: + - HTTP + - Method GET + summary: Return list of HTTP-related endpoints + description: Returns a list of first level HTTP endpoints. + operationId: getHttp + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /http/requests: + get: + tags: + - HTTP Requests + - Method GET + summary: Return HTTP requests statistics + description: Returns status of client HTTP requests. + operationId: getHttpRequests + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of client HTTP requests statistics + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPRequests' + delete: + tags: + - HTTP Requests + - Method DELETE + summary: Reset HTTP requests statistics + description: Resets the number of total client HTTP requests. + operationId: deleteHttpRequests + responses: + '204': + description: Success + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/server_zones/: + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of all HTTP server zones + description: Returns status information for each HTTP + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getHttpServerZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZonesMap' + '/http/server_zones/{httpServerZoneName}': + parameters: + - name: httpServerZoneName + in: path + description: The name of an HTTP server zone. + type: string + required: true + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of an HTTP server zone + description: Returns status of a particular HTTP server zone. + operationId: getHttpServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZone' + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Server Zones + - Method DELETE + summary: Reset statistics for an HTTP server zone + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular HTTP server zone. + operationId: deleteHttpServerZoneStat + responses: + '204': + description: Success + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/caches/: + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of all caches + description: Returns status of each cache configured by + [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + and other “*_cache_path” directives. + operationId: getHttpCaches + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of cache zones will be output. + If the “fields” value is empty, + then only names of cache zones are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCachesMap' + '/http/caches/{httpCacheZoneName}': + parameters: + - name: httpCacheZoneName + in: path + description: The name of the cache zone. + type: string + required: true + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of a cache + description: Returns status of a particular cache. + operationId: getHttpCacheZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the cache zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCache' + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Caches + - Method DELETE + summary: Reset cache statistics + description: Resets statistics of cache hits/misses in a particular cache zone. + operationId: deleteHttpCacheZoneStat + responses: + '204': + description: Success + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/upstreams/: + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of all HTTP upstream server groups + description: Returns status of each HTTP upstream server group + and its servers. + operationId: getHttpUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamMap' + '/http/upstreams/{httpUpstreamName}/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an HTTP upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of an HTTP upstream server group + description: Returns status of a particular HTTP upstream server group + and its servers. + operationId: getHttpUpstreamName + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Reset statistics of an HTTP upstream server group + description: Resets the statistics for each upstream server + in an upstream server group and queue statistics. + operationId: deleteHttpUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of all servers in an HTTP upstream server group + description: Returns configuration of each server + in a particular HTTP upstream server group. + operationId: getHttpUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Upstreams + - Method POST + summary: Add a server to an HTTP upstream server group + description: Adds a new server to an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: postHttpUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postHttpUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/{httpUpstreamServerId}': + parameters: + - name: httpUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: httpUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of a server in an HTTP upstream server group + description: Returns configuration of a particular server + in the HTTP upstream server group. + operationId: getHttpUpstreamPeer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Upstreams + - Method PATCH + summary: Modify a server in an HTTP upstream server group + description: Modifies settings of a particular server + in an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: patchHttpUpstreamPeer + produces: + - application/json + parameters: + - in: body + name: patchHttpUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “*ID*” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Remove a server from an HTTP upstream server group + description: Removes a server from an HTTP upstream server group. + operationId: deleteHttpUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/keyvals/: + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from all HTTP keyval zones + description: Returns key-value pairs for each HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only HTTP keyval zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonesMap' + '/http/keyvals/{httpKeyvalZoneName}': + parameters: + - name: httpKeyvalZoneName + in: path + description: The name of an HTTP keyval shared memory zone. + required: true + type: string + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from an HTTP keyval zone + description: Returns key-value pairs stored in a particular HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the HTTP keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Keyvals + - Method POST + summary: Add a key-value pair to the HTTP keyval zone + description: Adds a new key-value pair to the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + operationId: postHttpKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + responses: + '201': + description: Created + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair + or deletes a key by setting the key value to null. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + operationId: patchHttpKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: httpKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + responses: + '204': + description: Success + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Keyvals + - Method DELETE + summary: Empty the HTTP keyval zone + description: Deletes all key-value pairs from the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteHttpKeyvalZoneData + responses: + '204': + description: Success + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/: + get: + tags: + - Stream + summary: Return list of stream-related endpoints + description: Returns a list of first level stream endpoints. + operationId: getStream + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /stream/server_zones/: + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of all stream server zones + description: Returns status information for each stream + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getStreamServerZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZonesMap' + '/stream/server_zones/{streamServerZoneName}': + parameters: + - name: streamServerZoneName + in: path + description: The name of a stream server zone. + type: string + required: true + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of a stream server zone + description: Returns status of a particular stream server zone. + operationId: getStreamServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Server Zones + - Method DELETE + summary: Reset statistics for a stream server zone + description: Resets statistics of accepted and discarded connections, sessions, + received and sent bytes in a particular stream server zone. + operationId: deleteStreamServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/upstreams/: + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of all stream upstream server groups + description: Returns status of each stream upstream server group + and its servers. + operationId: getStreamUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamMap' + '/stream/upstreams/{streamUpstreamName}/': + parameters: + - name: streamUpstreamName + in: path + description: The name of a stream upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of a stream upstream server group + description: Returns status of a particular stream upstream server group + and its servers. + operationId: getStreamUpstream + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Reset statistics of a stream upstream server group + description: Resets the statistics for each upstream server + in an upstream server group. + operationId: deleteStreamUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/': + parameters: + - name: streamUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of all servers in a stream upstream server group + description: Returns configuration of each server + in a particular stream upstream server group. + operationId: getStreamUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Upstreams + - Method POST + summary: Add a server to a stream upstream server group + description: Adds a new server to a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: postStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postStreamUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/{streamUpstreamServerId}': + parameters: + - name: streamUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: streamUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of a server in a stream upstream server group + description: Returns configuration of a particular server + in the stream upstream server group. + operationId: getStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Upstreams + - Method PATCH + summary: Modify a server in a stream upstream server group + description: Modifies settings of a particular server + in a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: patchStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: patchStreamUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “**ID**” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Remove a server from a stream upstream server group + description: Removes a server from a stream server group. + operationId: deleteStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/keyvals/: + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from all stream keyval zones + description: Returns key-value pairs for each stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only stream keyval zone names are output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZonesMap' + '/stream/keyvals/{streamKeyvalZoneName}': + parameters: + - name: streamKeyvalZoneName + in: path + description: The name of a stream keyval shared memory zone. + required: true + type: string + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from a stream keyval zone + description: Returns key-value pairs stored in a particular stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the stream keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Keyvals + - Method POST + summary: Add a key-value pair to the stream keyval zone + description: Adds a new key-value pair to the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + operationId: postStreamKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + responses: + '201': + description: Created + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair + or deletes a key by setting the key value to null. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + operationId: patchStreamKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: streamKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + responses: + '204': + description: Success + '400': + description: | + Key required (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + nested object or list (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Keyvals + - Method DELETE + summary: Empty the stream keyval zone + description: Deletes all key-value pairs from the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteStreamKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/zone_sync/: + get: + tags: + - Stream Zone Sync + - Method GET + summary: Return sync status of a node + description: Returns synchronization status of a cluster node. + operationId: getStreamZoneSync + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamZoneSync' +### +###DEFINITIONS +### +definitions: + ArrayOfStrings: + title: Array + description: | + An array of strings. + type: array + items: + type: string + NginxObject: + title: nginx + description: | + General information about nginx: + type: object + properties: + version: + type: string + description: Version of nginx. + build: + type: string + description: Name of nginx build. + address: + type: string + description: The address of the server that accepted status request. + generation: + type: integer + description: The total number of configuration + reloads. + load_timestamp: + type: string + format: date-time + description: Time of the last reload of configuration, + in the ISO 8601 format with millisecond resolution. + timestamp: + type: string + format: date-time + description: Current time + in the ISO 8601 format with millisecond resolution. + pid: + type: integer + description: The ID of the worker process that handled status request. + ppid: + type: integer + description: The ID of the master process that started the + worker process. + example: + nginx: + version: 1.15.2 + build: nginx-plus-r16 + address: 206.251.255.64 + generation: 6 + load_timestamp: 2018-10-08T09:05:48.776Z + timestamp: 2018-10-08T15:23:17.056Z + pid: 32212 + ppid: 32210 + NginxProcesses: + title: Processes + type: object + properties: + respawned: + type: integer + description: The total number of abnormally terminated + and respawned child processes. + example: + respawned: 0 + NginxConnections: + title: Connections + description: | + The number of accepted, dropped, active, and idle connections. + type: object + properties: + accepted: + type: integer + description: The total number of accepted client connections. + dropped: + type: integer + description: The total number of dropped client connections. + active: + type: integer + description: The current number of active client connections. + idle: + type: integer + description: The current number of idle client connections. + example: + accepted: 4968119 + dropped: 0 + active: 5 + idle: 117 + NginxSSLObject: + title: SSL + type: object + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + example: + handshakes: 79572 + handshakes_failed: 21025 + session_reuses: 15762 + NginxSlabZoneMap: + title: Slab Zones + description: | + Status zones that use slab allocator. + type: object + additionalProperties: + $ref: '#/definitions/NginxSlabZone' + example: + http_cache: + pages: + used: 2 + free: 2452 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 1 + free: 126 + reqs: 1 + fails: 0 + 64: + used: 2 + free: 62 + reqs: 2 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + dns-backends: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZone: + title: Shared memory zone with slab allocator + description: | + type: object + properties: + pages: + type: object + description: The number of free and used memory pages. + properties: + used: + type: integer + description: The current number of used memory pages. + free: + type: integer + description: The current number of free memory pages. + slots: + type: object + title: Memory Slots + description: Status data for memory slots (8, 16, 32, 64, 128, etc.) + additionalProperties: + $ref: '#/definitions/NginxSlabZoneSlot' + example: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZoneSlot: + title: Memory Slot + type: object + properties: + used: + type: integer + description: The current number of used memory slots. + free: + type: integer + description: The current number of free memory slots. + reqs: + type: integer + description: The total number of attempts + to allocate memory of specified size. + fails: + type: integer + description: The number of unsuccessful attempts + to allocate memory of specified size. + NginxHTTPRequests: + title: HTTP Requests + type: object + properties: + total: + type: integer + description: The total number of client requests. + current: + type: integer + description: The current number of client requests. + example: + total: 10624511 + current: 4 + NginxHTTPServerZonesMap: + title: HTTP Server Zones + description: | + Status data for all HTTP + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPServerZone' + example: + site1: + processing: 2 + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + processing: 1 + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPServerZone: + title: HTTP Server Zone + type: object + properties: + processing: + type: integer + description: The number of client requests + that are currently being processed. + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients and the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + processing: 1 + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPCachesMap: + title: HTTP Caches + description: | + Status information of all HTTP caches configured by + proxy_cache_path + and other “*_cache_path” directives. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPCache' + example: + http-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + frontend-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPCache: + title: HTTP Cache + type: object + properties: + size: + type: integer + description: The current size of the cache. + max_size: + type: integer + description: The limit on the maximum size of the cache + specified in the configuration. + cold: + type: boolean + description: A boolean value indicating whether the “cache loader” process + is still loading data from disk into the cache. + hit: + type: object + properties: + responses: + type: integer + description: The total number of + valid + responses read from the cache. + bytes: + type: integer + description: The total number of bytes read from the cache. + stale: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + updating: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache + while responses were being updated (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + revalidated: + type: object + properties: + responses: + type: integer + description: The total number of expired and revalidated responses + read from the cache (see + proxy_cache_revalidate + and other “*_cache_revalidate” directives. + bytes: + type: integer + description: The total number of bytes read from the cache. + miss: + type: object + properties: + responses: + type: integer + description: The total number of responses not found in the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + expired: + type: object + properties: + responses: + type: integer + description: The total number of expired responses not taken from the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + bypass: + type: object + properties: + responses: + type: integer + description: The total number of responses not looked up in the cache due to the + proxy_cache_bypass + and other “*_cache_bypass” directives. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + example: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPUpstreamMap: + title: HTTP Upstreams + description: | + Status information of all HTTP + dynamically configurable + groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPUpstream' + example: + trac-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: trac-backend + hg-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: hg-backend + NginxHTTPUpstream: + title: HTTP Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxHTTPUpstreamPeerMap' + keepalive: + type: integer + description: The current number of idle + keepalive + connections. + zombies: + type: integer + description: The current number of servers removed + from the group but still processing active client requests. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + queue: + type: object + description: > + For the requests + queue, + the following data are provided: + properties: + size: + type: integer + description: The current number of requests in the queue. + max_size: + type: integer + description: The maximum number of requests that can be in the queue + at the same time. + overflows: + type: integer + description: The total number of requests rejected due to the queue overflow. + example: + upstream_backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 20 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 20 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + keepalive: 0 + zombies: 0 + zone: upstream_backend + NginxHTTPUpstreamPeerMap: + title: HTTP Upstream Servers + description: | + An array of HTTP + upstream servers. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamPeer' + NginxHTTPUpstreamPeer: + title: HTTP Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + enum: + - up + - draining + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “draining”, “down”, + “unavail”, “checking”, + and “unhealthy”. + active: + type: integer + description: The current number of active connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + requests: + type: integer + description: The total number of client requests forwarded to this server. + readOnly: true + responses: + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses obtained from this server. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client requests + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + fails: + type: integer + description: The number of failed health checks. + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + last_passed: + type: boolean + description: Boolean indicating if the last health check request was successful + and passed + tests. + downtime: + type: integer + readOnly: true + description: Total time the server was in the “unavail”, + “checking”, and “unhealthy” states. + downstart: + type: string + format: date-time + readOnly: true + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + selected: + type: string + format: date-time + readOnly: true + description: The time when the server was last selected to process a request, + in the ISO 8601 format with millisecond resolution. + header_time: + type: integer + readOnly: true + description: The average time to get the + response header + from the server. + response_time: + type: integer + readOnly: true + description: The average time to get the + full response + from the server. + NginxHTTPUpstreamConfServerMap: + title: HTTP Upstream Servers + description: An array of HTTP upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:8088 + weight: 1 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: false + down: false + - id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPUpstreamConfServer: + title: HTTP Upstream Server + description: | + Dynamically configurable parameters of an HTTP upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the HTTP upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the HTTP upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “http” block. + See also the + resolve + parameter of the HTTP upstream server. + service: + type: string + description: Same as the + service + parameter of the HTTP upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the HTTP upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the HTTP upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the HTTP upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the HTTP upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the HTTP upstream server. + route: + type: string + description: Same as the + route + parameter of the HTTP upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the HTTP upstream server. + drain: + type: boolean + description: Same as the + drain + parameter of the HTTP upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPKeyvalZonesMap: + title: HTTP Keyval Shared Memory Zones + description: | + Contents of all HTTP keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxHTTPKeyvalZone: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamServerZonesMap: + title: Stream Server Zones + description: | + Status information for all stream + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamServerZone' + example: + mysql-frontend: + processing: 2 + connections: 270925 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 270925 + discarded: 0 + received: 28988975 + sent: 3879346317 + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamServerZone: + title: Stream Server Zone + type: object + properties: + processing: + type: integer + description: The number of client connections + that are currently being processed. + connections: + type: integer + description: The total number of connections accepted from clients. + sessions: + type: object + description: The total number of completed sessions, + and the number of sessions completed with status codes + “2xx”, “4xx”, or “5xx”. + properties: + 2xx: + type: integer + description: The total number of sessions completed with + status codes + “2xx”. + 4xx: + type: integer + description: The total number of sessions completed with + status codes + “4xx”. + 5xx: + type: integer + description: The total number of sessions completed with + status codes + “5xx”. + total: + type: integer + description: The total number of completed client sessions. + discarded: + type: integer + description: The total number of + connections completed without creating a session. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamUpstreamMap: + title: Stream Upstreams + description: Status information of stream upstream server groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamUpstream' + example: + mysql_backends: + peers: + - id: 0 + server: 10.0.0.1:12345 + name: 10.0.0.1:12345 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connecions: 1231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12346 + name: 10.0.0.1:12346 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 30 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: mysql_backends + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + connections: 0 + max_conns: 30 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstream: + title: Stream Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxStreamUpstreamPeerMap' + zombies: + type: integer + description: The current number of servers removed from the group + but still processing active client connections. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + example: + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 50 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 50 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2017-07-07T11:09:21.602Z + selected: 2017-07-17T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstreamPeerMap: + title: Stream Upstream Servers + description: Array of stream upstream servers. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamPeer' + NginxStreamUpstreamPeer: + title: Stream Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An + address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + format: hostname + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + readOnly: true + enum: + - up + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “down”, “unavail”, + “checking”, or “unhealthy”. + active: + type: integer + description: The current number of connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + connections: + type: integer + description: The total number of client connections forwarded to this server. + readOnly: true + connect_time: + type: integer + description: The average time to connect to the upstream server. + readOnly: true + first_byte_time: + type: integer + description: The average time to receive the first byte of data. + readOnly: true + response_time: + type: integer + description: The average time to receive the last byte of data. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client connections + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + readOnly: true + fails: + type: integer + description: The number of failed health checks. + readOnly: true + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + readOnly: true + last_passed: + type: boolean + description: Boolean indicating whether the last health check request + was successful and passed + tests. + readOnly: true + downtime: + type: integer + description: Total time the server was in the + “unavail”, “checking”, + and “unhealthy” states. + readOnly: true + downstart: + type: string + format: date-time + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + readOnly: true + selected: + type: string + format: date-time + description: The time when the server was last selected + to process a connection, + in the ISO 8601 format with millisecond resolution. + readOnly: true + NginxStreamUpstreamConfServerMap: + title: Stream Upstream Servers + description: | + An array of stream upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + - id: 1 + server: 10.0.0.1:12349 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamUpstreamConfServer: + title: Stream Upstream Server + description: | + Dynamically configurable parameters of a stream upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the stream upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the stream upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “stream” block. + See also the + resolve + parameter of the stream upstream server. + service: + type: string + description: Same as the + service + parameter of the stream upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the stream upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the stream upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the stream upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the stream upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the stream upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the stream upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamKeyvalZonesMap: + title: Stream Keyval Shared Memory Zones + description: | + Contents of all stream keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxStreamKeyvalZone: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamZoneSync: + title: Stream Zone Sync Node + type: object + properties: + zones: + type: object + title: Zone Sync Zones + description: Synchronization information per each shared memory zone. + additionalProperties: + $ref: '#/definitions/NginxStreamZoneSyncZone' + status: + type: object + description: Synchronization information per node in a cluster. + properties: + bytes_in: + type: integer + description: The number of bytes received by this node. + msgs_in: + type: integer + description: The number of messages received by this node. + msgs_out: + type: integer + description: The number of messages sent by this node. + bytes_out: + type: integer + description: The number of bytes sent by this node. + nodes_online: + type: integer + description: The number of peers this node is connected to. + example: + zones: + zone1: + records_pending: 2061 + records_total: 260575 + zone2: + records_pending: 0 + records_total: 14749 + status: + bytes_in: 1364923761 + msgs_in: 337236 + msgs_out: 346717 + bytes_out: 1402765472 + nodes_online: 15 + NginxStreamZoneSyncZone: + title: Sync Zone + description: Synchronization status of a shared memory zone. + type: object + properties: + records_pending: + type: integer + description: The number of records that need to be sent to the cluster. + records_total: + type: integer + description: The total number of records stored in the shared memory zone. + NginxError: + title: Error + description: | + nginx error object. + type: object + properties: + error: + type: object + properties: + status: + type: integer + description: HTTP error code. + text: + type: string + description: Error description. + code: + type: string + description: Internal nginx error code. + request_id: + type: string + description: The ID of the request, equals the value of the + $request_id + variable. + href: + type: string + description: Link to reference documentation. diff --git a/content/nginx/admin-guide/yaml/v5/nginx_api.yaml b/content/nginx/admin-guide/yaml/v5/nginx_api.yaml new file mode 100644 index 000000000..090648efe --- /dev/null +++ b/content/nginx/admin-guide/yaml/v5/nginx_api.yaml @@ -0,0 +1,3646 @@ +swagger: '2.0' +info: + version: '5.0' + title: NGINX Plus REST API + description: NGINX Plus REST + [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + provides access to NGINX Plus status information, + on-the-fly configuration of upstream servers and + key-value pairs management for + [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and + [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). +basePath: /api/5 +tags: + - name: General Info + - name: Processes + - name: Connections + - name: Slabs + - name: Resolvers + - name: SSL + - name: HTTP + - name: HTTP Requests + - name: HTTP Server Zones + - name: HTTP Location Zones + - name: HTTP Caches + - name: HTTP Keyvals + - name: HTTP Upstreams + - name: Stream + - name: Stream Server Zones + - name: Stream Keyvals + - name: Stream Upstreams + - name: Stream Zone Sync + - name: Method GET + - name: Method POST + - name: Method PATCH + - name: Method DELETE +schemes: + - http +paths: + /: + get: + tags: + - General Info + - Method GET + summary: Return list of root endpoints + description: Returns a list of root endpoints. + operationId: getAPIEndpoints + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /nginx: + get: + tags: + - General Info + - Method GET + summary: Return status of nginx running instance + description: Returns nginx version, build name, address, + number of configuration reloads, IDs of master and worker processes. + operationId: getNginx + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of nginx running instance will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxObject' + /processes: + get: + tags: + - Processes + - Method GET + summary: Return nginx processes status + description: Returns the number of abnormally terminated + and respawned child processes. + operationId: getProcesses + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxProcesses' + delete: + tags: + - Processes + - Method DELETE + summary: Reset nginx processes statistics + description: Resets counters of abnormally terminated and respawned + child processes. + operationId: deleteProcesses + responses: + '204': + description: Success + /connections: + get: + tags: + - Connections + - Method GET + summary: Return client connections statistics + description: Returns statistics of client connections. + operationId: getConnections + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxConnections' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the connections statistics will be output. + delete: + tags: + - Connections + - Method DELETE + summary: Reset client connections statistics + description: Resets statistics of accepted and dropped + client connections. + operationId: deleteConnections + responses: + '204': + description: Success + /slabs/: + get: + tags: + - Slabs + - Method GET + summary: Return status of all slabs + description: Returns status of slabs + for each shared memory zone with slab allocator. + operationId: getSlabs + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of slab zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZoneMap' + '/slabs/{slabZoneName}': + parameters: + - name: slabZoneName + in: path + description: The name of the shared memory zone with slab allocator. + required: true + type: string + get: + tags: + - Slabs + - Method GET + summary: Return status of a slab + description: Returns status of slabs for a particular shared memory zone + with slab allocator. + operationId: getSlabZone + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the slab zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZone' + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Slabs + - Method DELETE + summary: Reset slab statistics + description: Resets the “reqs” and “fails” + metrics for each memory slot. + operationId: deleteSlabZoneStats + responses: + '204': + description: Success + '404': + description: Slab not found (*SlabNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/: + get: + tags: + - HTTP + - Method GET + summary: Return list of HTTP-related endpoints + description: Returns a list of first level HTTP endpoints. + operationId: getHttp + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /http/requests: + get: + tags: + - HTTP Requests + - Method GET + summary: Return HTTP requests statistics + description: Returns status of client HTTP requests. + operationId: getHttpRequests + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of client HTTP requests statistics + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPRequests' + delete: + tags: + - HTTP Requests + - Method DELETE + summary: Reset HTTP requests statistics + description: Resets the number of total client HTTP requests. + operationId: deleteHttpRequests + responses: + '204': + description: Success + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/server_zones/: + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of all HTTP server zones + description: Returns status information for each HTTP + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getHttpServerZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZonesMap' + '/http/server_zones/{httpServerZoneName}': + parameters: + - name: httpServerZoneName + in: path + description: The name of an HTTP server zone. + type: string + required: true + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of an HTTP server zone + description: Returns status of a particular HTTP server zone. + operationId: getHttpServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZone' + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Server Zones + - Method DELETE + summary: Reset statistics for an HTTP server zone + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular HTTP server zone. + operationId: deleteHttpServerZoneStat + responses: + '204': + description: Success + '404': + description: Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/location_zones/: + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of all HTTP location zones + description: Returns status information for each HTTP + [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of location zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZonesMap' + '/http/location_zones/{httpLocationZoneName}': + parameters: + - name: httpLocationZoneName + in: path + description: The name of an HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + type: string + required: true + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of an HTTP location zone + description: Returns status of a particular + HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the location zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZone' + '404': + description: Location zone not found (*LocationZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Location Zones + - Method DELETE + summary: Reset statistics for a location zone. + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular location zone. + operationId: deleteHttpLocationZoneStat + responses: + '204': + description: Success + '404': + description: Location zone not found (*LocationZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/caches/: + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of all caches + description: Returns status of each cache configured by + [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + and other “*_cache_path” directives. + operationId: getHttpCaches + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of cache zones will be output. + If the “fields” value is empty, + then only names of cache zones will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCachesMap' + '/http/caches/{httpCacheZoneName}': + parameters: + - name: httpCacheZoneName + in: path + description: The name of the cache zone. + type: string + required: true + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of a cache + description: Returns status of a particular cache. + operationId: getHttpCacheZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the cache zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCache' + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Caches + - Method DELETE + summary: Reset cache statistics + description: Resets statistics of cache hits/misses in a particular cache zone. + operationId: deleteHttpCacheZoneStat + responses: + '204': + description: Success + '404': + description: Cache not found (*CacheNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/upstreams/: + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of all HTTP upstream server groups + description: Returns status of each HTTP upstream server group + and its servers. + operationId: getHttpUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamMap' + '/http/upstreams/{httpUpstreamName}/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an HTTP upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of an HTTP upstream server group + description: Returns status of a particular HTTP upstream server group + and its servers. + operationId: getHttpUpstreamName + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Reset statistics of an HTTP upstream server group + description: Resets the statistics for each upstream server + in an upstream server group and queue statistics. + operationId: deleteHttpUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of all servers in an HTTP upstream server group + description: Returns configuration of each server + in a particular HTTP upstream server group. + operationId: getHttpUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Upstreams + - Method POST + summary: Add a server to an HTTP upstream server group + description: Adds a new server to an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: postHttpUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postHttpUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/{httpUpstreamServerId}': + parameters: + - name: httpUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: httpUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of a server in an HTTP upstream server group + description: Returns configuration of a particular server + in the HTTP upstream server group. + operationId: getHttpUpstreamPeer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Upstreams + - Method PATCH + summary: Modify a server in an HTTP upstream server group + description: Modifies settings of a particular server + in an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: patchHttpUpstreamPeer + produces: + - application/json + parameters: + - in: body + name: patchHttpUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “*ID*” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Remove a server from an HTTP upstream server group + description: Removes a server from an HTTP upstream server group. + operationId: deleteHttpUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/keyvals/: + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from all HTTP keyval zones + description: Returns key-value pairs for each HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only HTTP keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonesMap' + '/http/keyvals/{httpKeyvalZoneName}': + parameters: + - name: httpKeyvalZoneName + in: path + description: The name of an HTTP keyval shared memory zone. + required: true + type: string + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from an HTTP keyval zone + description: Returns key-value pairs stored in a particular HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the HTTP keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Keyvals + - Method POST + summary: Add a key-value pair to the HTTP keyval zone + description: Adds a new key-value pair to the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + operationId: postHttpKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchHttpKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: httpKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Keyvals + - Method DELETE + summary: Empty the HTTP keyval zone + description: Deletes all key-value pairs from the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteHttpKeyvalZoneData + responses: + '204': + description: Success + '404': + description: Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/: + get: + tags: + - Stream + summary: Return list of stream-related endpoints + description: Returns a list of first level stream endpoints. + operationId: getStream + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + /stream/server_zones/: + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of all stream server zones + description: Returns status information for each stream + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getStreamServerZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZonesMap' + '/stream/server_zones/{streamServerZoneName}': + parameters: + - name: streamServerZoneName + in: path + description: The name of a stream server zone. + type: string + required: true + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of a stream server zone + description: Returns status of a particular stream server zone. + operationId: getStreamServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Server Zones + - Method DELETE + summary: Reset statistics for a stream server zone + description: Resets statistics of accepted and discarded connections, sessions, + received and sent bytes in a particular stream server zone. + operationId: deleteStreamServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/upstreams/: + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of all stream upstream server groups + description: Returns status of each stream upstream server group + and its servers. + operationId: getStreamUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamMap' + '/stream/upstreams/{streamUpstreamName}/': + parameters: + - name: streamUpstreamName + in: path + description: The name of a stream upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of a stream upstream server group + description: Returns status of a particular stream upstream server group + and its servers. + operationId: getStreamUpstream + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Reset statistics of a stream upstream server group + description: Resets the statistics for each upstream server + in an upstream server group. + operationId: deleteStreamUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/': + parameters: + - name: streamUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of all servers in a stream upstream server group + description: Returns configuration of each server + in a particular stream upstream server group. + operationId: getStreamUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Upstreams + - Method POST + summary: Add a server to a stream upstream server group + description: Adds a new server to a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: postStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postStreamUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/{streamUpstreamServerId}': + parameters: + - name: streamUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: streamUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of a server in a stream upstream server group + description: Returns configuration of a particular server + in the stream upstream server group. + operationId: getStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Upstreams + - Method PATCH + summary: Modify a server in a stream upstream server group + description: Modifies settings of a particular server + in a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: patchStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: patchStreamUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “**ID**” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Remove a server from a stream upstream server group + description: Removes a server from a stream server group. + operationId: deleteStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/keyvals/: + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from all stream keyval zones + description: Returns key-value pairs for each stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only stream keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZonesMap' + '/stream/keyvals/{streamKeyvalZoneName}': + parameters: + - name: streamKeyvalZoneName + in: path + description: The name of a stream keyval shared memory zone. + required: true + type: string + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from a stream keyval zone + description: Returns key-value pairs stored in a particular stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the stream keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Keyvals + - Method POST + summary: Add a key-value pair to the stream keyval zone + description: Adds a new key-value pair to the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + operationId: postStreamKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time is specified in milliseconds + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchStreamKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: streamKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Keyvals + - Method DELETE + summary: Empty the stream keyval zone + description: Deletes all key-value pairs from the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteStreamKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/zone_sync/: + get: + tags: + - Stream Zone Sync + - Method GET + summary: Return sync status of a node + description: Returns synchronization status of a cluster node. + operationId: getStreamZoneSync + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamZoneSync' + /resolvers/: + get: + tags: + - Resolvers + - Method GET + summary: Return status for all resolver zones + description: Returns status information for each + [resolver zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZones + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZonesMap' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of resolvers statistics will be output. + '/resolvers/{resolverZoneName}': + parameters: + - name: resolverZoneName + in: path + description: The name of a resolver zone. + required: true + type: string + get: + tags: + - Resolvers + - Method GET + summary: Return statistics of a resolver zone + description: Returns statistics stored in a particular resolver + [zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the resolver zone will be output + (requests, responses, or both). + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZone' + '404': + description: | + Resolver zone not found (*ResolverZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Resolvers + - Method DELETE + summary: Reset statistics for a resolver zone. + description: Resets statistics in a particular resolver zone. + operationId: deleteResolverZoneStat + responses: + '204': + description: Success + '404': + description: Resolver zone not found (*ResolverZoneNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /ssl: + get: + tags: + - SSL + - Method GET + summary: Return SSL statistics + description: Returns SSL statistics. + operationId: getSsl + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSSLObject' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of SSL statistics will be output. + delete: + tags: + - SSL + - Method DELETE + summary: Reset SSL statistics + description: Resets counters of SSL handshakes and session reuses. + operationId: deleteSslStat + responses: + '204': + description: Success +### +###DEFINITIONS +### +definitions: + ArrayOfStrings: + title: Array + description: | + An array of strings. + type: array + items: + type: string + NginxObject: + title: nginx + description: | + General information about nginx: + type: object + properties: + version: + type: string + description: Version of nginx. + build: + type: string + description: Name of nginx build. + address: + type: string + description: The address of the server that accepted status request. + generation: + type: integer + description: The total number of configuration + reloads. + load_timestamp: + type: string + format: date-time + description: Time of the last reload of configuration, + in the ISO 8601 format with millisecond resolution. + timestamp: + type: string + format: date-time + description: Current time + in the ISO 8601 format with millisecond resolution. + pid: + type: integer + description: The ID of the worker process that handled status request. + ppid: + type: integer + description: The ID of the master process that started the + worker process. + example: + nginx: + version: 1.17.3 + build: nginx-plus-r19 + address: 206.251.255.64 + generation: 6 + load_timestamp: 2019-10-01T11:15:44.467Z + timestamp: 2019-10-01T09:26:07.305Z + pid: 32212 + ppid: 32210 + NginxProcesses: + title: Processes + type: object + properties: + respawned: + type: integer + description: The total number of abnormally terminated + and respawned child processes. + example: + respawned: 0 + NginxConnections: + title: Connections + description: | + The number of accepted, dropped, active, and idle connections. + type: object + properties: + accepted: + type: integer + description: The total number of accepted client connections. + dropped: + type: integer + description: The total number of dropped client connections. + active: + type: integer + description: The current number of active client connections. + idle: + type: integer + description: The current number of idle client connections. + example: + accepted: 4968119 + dropped: 0 + active: 5 + idle: 117 + NginxSSLObject: + title: SSL + type: object + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + example: + handshakes: 79572 + handshakes_failed: 21025 + session_reuses: 15762 + NginxSlabZoneMap: + title: Slab Zones + description: | + Status zones that use slab allocator. + type: object + additionalProperties: + $ref: '#/definitions/NginxSlabZone' + example: + http_cache: + pages: + used: 2 + free: 2452 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 1 + free: 126 + reqs: 1 + fails: 0 + 64: + used: 2 + free: 62 + reqs: 2 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + dns-backends: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZone: + title: Shared memory zone with slab allocator + description: | + type: object + properties: + pages: + type: object + description: The number of free and used memory pages. + properties: + used: + type: integer + description: The current number of used memory pages. + free: + type: integer + description: The current number of free memory pages. + slots: + type: object + title: Memory Slots + description: Status data for memory slots (8, 16, 32, 64, 128, etc.) + additionalProperties: + $ref: '#/definitions/NginxSlabZoneSlot' + example: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZoneSlot: + title: Memory Slot + type: object + properties: + used: + type: integer + description: The current number of used memory slots. + free: + type: integer + description: The current number of free memory slots. + reqs: + type: integer + description: The total number of attempts + to allocate memory of specified size. + fails: + type: integer + description: The number of unsuccessful attempts + to allocate memory of specified size. + NginxHTTPRequests: + title: HTTP Requests + type: object + properties: + total: + type: integer + description: The total number of client requests. + current: + type: integer + description: The current number of client requests. + example: + total: 10624511 + current: 4 + NginxHTTPServerZonesMap: + title: HTTP Server Zones + description: | + Status data for all HTTP + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPServerZone' + example: + site1: + processing: 2 + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + processing: 1 + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPServerZone: + title: HTTP Server Zone + type: object + properties: + processing: + type: integer + description: The number of client requests + that are currently being processed. + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients and the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + processing: 1 + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPLocationZonesMap: + title: HTTP Location Zones + description: | + Status data for all HTTP + location zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLocationZone' + example: + site1: + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPLocationZone: + title: HTTP Location Zone + type: object + properties: + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients and the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPCachesMap: + title: HTTP Caches + description: | + Status information of all HTTP caches configured by + proxy_cache_path + and other “*_cache_path” directives. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPCache' + example: + http-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + frontend-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPCache: + title: HTTP Cache + type: object + properties: + size: + type: integer + description: The current size of the cache. + max_size: + type: integer + description: The limit on the maximum size of the cache + specified in the configuration. + cold: + type: boolean + description: A boolean value indicating whether the “cache loader” process + is still loading data from disk into the cache. + hit: + type: object + properties: + responses: + type: integer + description: The total number of + valid + responses read from the cache. + bytes: + type: integer + description: The total number of bytes read from the cache. + stale: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + updating: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache + while responses were being updated (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + revalidated: + type: object + properties: + responses: + type: integer + description: The total number of expired and revalidated responses + read from the cache (see + proxy_cache_revalidate + and other “*_cache_revalidate” directives. + bytes: + type: integer + description: The total number of bytes read from the cache. + miss: + type: object + properties: + responses: + type: integer + description: The total number of responses not found in the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + expired: + type: object + properties: + responses: + type: integer + description: The total number of expired responses not taken from the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + bypass: + type: object + properties: + responses: + type: integer + description: The total number of responses not looked up in the cache due to the + proxy_cache_bypass + and other “*_cache_bypass” directives. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + example: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPUpstreamMap: + title: HTTP Upstreams + description: | + Status information of all HTTP + dynamically configurable + groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPUpstream' + example: + trac-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + keepalive: 0 + zombies: 0 + zone: trac-backend + hg-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + keepalive: 0 + zombies: 0 + zone: hg-backend + NginxHTTPUpstream: + title: HTTP Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxHTTPUpstreamPeerMap' + keepalive: + type: integer + description: The current number of idle + keepalive + connections. + zombies: + type: integer + description: The current number of servers removed + from the group but still processing active client requests. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + queue: + type: object + description: > + For the requests + queue, + the following data are provided: + properties: + size: + type: integer + description: The current number of requests in the queue. + max_size: + type: integer + description: The maximum number of requests that can be in the queue + at the same time. + overflows: + type: integer + description: The total number of requests rejected due to the queue overflow. + example: + upstream_backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 20 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 20 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + keepalive: 0 + zombies: 0 + zone: upstream_backend + NginxHTTPUpstreamPeerMap: + title: HTTP Upstream Servers + description: | + An array of HTTP + upstream servers. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamPeer' + NginxHTTPUpstreamPeer: + title: HTTP Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + enum: + - up + - draining + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “draining”, “down”, + “unavail”, “checking”, + and “unhealthy”. + active: + type: integer + description: The current number of active connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + requests: + type: integer + description: The total number of client requests forwarded to this server. + readOnly: true + responses: + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses obtained from this server. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client requests + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + fails: + type: integer + description: The number of failed health checks. + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + last_passed: + type: boolean + description: Boolean indicating if the last health check request was successful + and passed + tests. + downtime: + type: integer + readOnly: true + description: Total time the server was in the “unavail”, + “checking”, and “unhealthy” states. + downstart: + type: string + format: date-time + readOnly: true + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + selected: + type: string + format: date-time + readOnly: true + description: The time when the server was last selected to process a request, + in the ISO 8601 format with millisecond resolution. + header_time: + type: integer + readOnly: true + description: The average time to get the + response header + from the server. + response_time: + type: integer + readOnly: true + description: The average time to get the + full response + from the server. + NginxHTTPUpstreamConfServerMap: + title: HTTP Upstream Servers + description: An array of HTTP upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:8088 + weight: 1 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: false + down: false + - id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPUpstreamConfServer: + title: HTTP Upstream Server + description: | + Dynamically configurable parameters of an HTTP upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the HTTP upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the HTTP upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “http” block. + See also the + resolve + parameter of the HTTP upstream server. + service: + type: string + description: Same as the + service + parameter of the HTTP upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the HTTP upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the HTTP upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the HTTP upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the HTTP upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the HTTP upstream server. + route: + type: string + description: Same as the + route + parameter of the HTTP upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the HTTP upstream server. + drain: + type: boolean + description: Same as the + drain + parameter of the HTTP upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPKeyvalZonesMap: + title: HTTP Keyval Shared Memory Zones + description: | + Contents of all HTTP keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxHTTPKeyvalZone: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxHTTPKeyvalZonePostPatch: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamServerZonesMap: + title: Stream Server Zones + description: | + Status information for all stream + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamServerZone' + example: + mysql-frontend: + processing: 2 + connections: 270925 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 270925 + discarded: 0 + received: 28988975 + sent: 3879346317 + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamServerZone: + title: Stream Server Zone + type: object + properties: + processing: + type: integer + description: The number of client connections + that are currently being processed. + connections: + type: integer + description: The total number of connections accepted from clients. + sessions: + type: object + description: The total number of completed sessions, + and the number of sessions completed with status codes + “2xx”, “4xx”, or “5xx”. + properties: + 2xx: + type: integer + description: The total number of sessions completed with + status codes + “2xx”. + 4xx: + type: integer + description: The total number of sessions completed with + status codes + “4xx”. + 5xx: + type: integer + description: The total number of sessions completed with + status codes + “5xx”. + total: + type: integer + description: The total number of completed client sessions. + discarded: + type: integer + description: The total number of + connections completed without creating a session. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamUpstreamMap: + title: Stream Upstreams + description: Status information of stream upstream server groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamUpstream' + example: + mysql_backends: + peers: + - id: 0 + server: 10.0.0.1:12345 + name: 10.0.0.1:12345 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connecions: 1231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:12346 + name: 10.0.0.1:12346 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 30 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + zombies: 0 + zone: mysql_backends + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + connections: 0 + max_conns: 30 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstream: + title: Stream Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxStreamUpstreamPeerMap' + zombies: + type: integer + description: The current number of servers removed from the group + but still processing active client connections. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + example: + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 50 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 50 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstreamPeerMap: + title: Stream Upstream Servers + description: Array of stream upstream servers. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamPeer' + NginxStreamUpstreamPeer: + title: Stream Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An + address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + format: hostname + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + readOnly: true + enum: + - up + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “down”, “unavail”, + “checking”, or “unhealthy”. + active: + type: integer + description: The current number of connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + connections: + type: integer + description: The total number of client connections forwarded to this server. + readOnly: true + connect_time: + type: integer + description: The average time to connect to the upstream server. + readOnly: true + first_byte_time: + type: integer + description: The average time to receive the first byte of data. + readOnly: true + response_time: + type: integer + description: The average time to receive the last byte of data. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client connections + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + readOnly: true + fails: + type: integer + description: The number of failed health checks. + readOnly: true + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + readOnly: true + last_passed: + type: boolean + description: Boolean indicating whether the last health check request + was successful and passed + tests. + readOnly: true + downtime: + type: integer + description: Total time the server was in the + “unavail”, “checking”, + and “unhealthy” states. + readOnly: true + downstart: + type: string + format: date-time + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + readOnly: true + selected: + type: string + format: date-time + description: The time when the server was last selected + to process a connection, + in the ISO 8601 format with millisecond resolution. + readOnly: true + NginxStreamUpstreamConfServerMap: + title: Stream Upstream Servers + description: | + An array of stream upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + - id: 1 + server: 10.0.0.1:12349 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamUpstreamConfServer: + title: Stream Upstream Server + description: | + Dynamically configurable parameters of a stream upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the stream upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the stream upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “stream” block. + See also the + resolve + parameter of the stream upstream server. + service: + type: string + description: Same as the + service + parameter of the stream upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the stream upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the stream upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the stream upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the stream upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the stream upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the stream upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamKeyvalZonesMap: + title: Stream Keyval Shared Memory Zones + description: | + Contents of all stream keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxStreamKeyvalZone: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamKeyvalZonePostPatch: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamZoneSync: + title: Stream Zone Sync Node + type: object + properties: + zones: + type: object + title: Zone Sync Zones + description: Synchronization information per each shared memory zone. + additionalProperties: + $ref: '#/definitions/NginxStreamZoneSyncZone' + status: + type: object + description: Synchronization information per node in a cluster. + properties: + bytes_in: + type: integer + description: The number of bytes received by this node. + msgs_in: + type: integer + description: The number of messages received by this node. + msgs_out: + type: integer + description: The number of messages sent by this node. + bytes_out: + type: integer + description: The number of bytes sent by this node. + nodes_online: + type: integer + description: The number of peers this node is connected to. + example: + zones: + zone1: + records_pending: 2061 + records_total: 260575 + zone2: + records_pending: 0 + records_total: 14749 + status: + bytes_in: 1364923761 + msgs_in: 337236 + msgs_out: 346717 + bytes_out: 1402765472 + nodes_online: 15 + NginxStreamZoneSyncZone: + title: Sync Zone + description: Synchronization status of a shared memory zone. + type: object + properties: + records_pending: + type: integer + description: The number of records that need to be sent to the cluster. + records_total: + type: integer + description: The total number of records stored in the shared memory zone. + NginxResolverZonesMap: + title: Resolver Zones + description: | + Status data for all + resolver zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxResolverZone' + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + resolver_zone2: + requests: + name: 325460 + srv: 1130 + addr: 12580 + responses: + noerror: 226499 + formerr: 0 + servfail: 283 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 743 + unknown: 1478 + NginxResolverZone: + title: Resolver Zone + description: | + Statistics of DNS requests and responses per particular + resolver zone. + type: object + properties: + requests: + type: object + readOnly: true + properties: + name: + type: integer + description: The total number of requests + to resolve names to addresses. + readOnly: true + srv: + type: integer + description: The total number of requests + to resolve SRV records. + readOnly: true + addr: + type: integer + description: The total number of requests + to resolve addresses to names. + readOnly: true + responses: + type: object + readOnly: true + properties: + noerror: + type: integer + description: The total number of successful responses. + readOnly: true + formerr: + type: integer + description: The total number of + FORMERR (Format error) responses. + readOnly: true + servfail: + type: integer + description: The total number of + SERVFAIL (Server failure) responses. + readOnly: true + nxdomain: + type: integer + description: The total number of + NXDOMAIN (Host not found) responses. + readOnly: true + notimp: + type: integer + description: The total number of + NOTIMP (Unimplemented) responses. + readOnly: true + refused: + type: integer + description: The total number of + REFUSED (Operation refused) responses. + readOnly: true + timedout: + type: integer + description: The total number of timed out requests. + readOnly: true + unknown: + type: integer + description: The total number of requests + completed with an unknown error. + readOnly: true + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + NginxError: + title: Error + description: | + nginx error object. + type: object + properties: + error: + type: object + properties: + status: + type: integer + description: HTTP error code. + text: + type: string + description: Error description. + code: + type: string + description: Internal nginx error code. + request_id: + type: string + description: The ID of the request, equals the value of the + $request_id + variable. + href: + type: string + description: Link to reference documentation. diff --git a/content/nginx/admin-guide/yaml/v6/nginx_api.yaml b/content/nginx/admin-guide/yaml/v6/nginx_api.yaml new file mode 100644 index 000000000..9483a30ce --- /dev/null +++ b/content/nginx/admin-guide/yaml/v6/nginx_api.yaml @@ -0,0 +1,4181 @@ +swagger: '2.0' +info: + version: '6.0' + title: NGINX Plus REST API + description: NGINX Plus REST + [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + provides access to NGINX Plus status information, + on-the-fly configuration of upstream servers and + key-value pairs management for + [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and + [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). +basePath: /api/6 +tags: + - name: General Info + - name: Processes + - name: Connections + - name: Slabs + - name: Resolvers + - name: SSL + - name: HTTP + - name: HTTP Requests + - name: HTTP Server Zones + - name: HTTP Location Zones + - name: HTTP Caches + - name: HTTP Limit Conns + - name: HTTP Limit Reqs + - name: HTTP Keyvals + - name: HTTP Upstreams + - name: Stream + - name: Stream Server Zones + - name: Stream Limit Conns + - name: Stream Keyvals + - name: Stream Upstreams + - name: Stream Zone Sync + - name: Method GET + - name: Method POST + - name: Method PATCH + - name: Method DELETE +schemes: + - http +paths: + /: + get: + tags: + - General Info + - Method GET + summary: Return list of root endpoints + description: Returns a list of root endpoints. + operationId: getAPIEndpoints + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /nginx: + get: + tags: + - General Info + - Method GET + summary: Return status of nginx running instance + description: Returns nginx version, build name, address, + number of configuration reloads, IDs of master and worker processes. + operationId: getNginx + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of nginx running instance will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxObject' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /processes: + get: + tags: + - Processes + - Method GET + summary: Return nginx processes status + description: Returns the number of abnormally terminated + and respawned child processes. + operationId: getProcesses + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxProcesses' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Processes + - Method DELETE + summary: Reset nginx processes statistics + description: Resets counters of abnormally terminated and respawned + child processes. + operationId: deleteProcesses + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /connections: + get: + tags: + - Connections + - Method GET + summary: Return client connections statistics + description: Returns statistics of client connections. + operationId: getConnections + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxConnections' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the connections statistics will be output. + delete: + tags: + - Connections + - Method DELETE + summary: Reset client connections statistics + description: Resets statistics of accepted and dropped + client connections. + operationId: deleteConnections + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /slabs/: + get: + tags: + - Slabs + - Method GET + summary: Return status of all slabs + description: Returns status of slabs + for each shared memory zone with slab allocator. + operationId: getSlabs + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of slab zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZoneMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/slabs/{slabZoneName}': + parameters: + - name: slabZoneName + in: path + description: The name of the shared memory zone with slab allocator. + required: true + type: string + get: + tags: + - Slabs + - Method GET + summary: Return status of a slab + description: Returns status of slabs for a particular shared memory zone + with slab allocator. + operationId: getSlabZone + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the slab zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZone' + '404': + description: | + Slab not found (*SlabNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Slabs + - Method DELETE + summary: Reset slab statistics + description: Resets the “reqs” and “fails” + metrics for each memory slot. + operationId: deleteSlabZoneStats + responses: + '204': + description: Success + '404': + description: | + Slab not found (*SlabNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/: + get: + tags: + - HTTP + - Method GET + summary: Return list of HTTP-related endpoints + description: Returns a list of first level HTTP endpoints. + operationId: getHttp + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /http/requests: + get: + tags: + - HTTP Requests + - Method GET + summary: Return HTTP requests statistics + description: Returns status of client HTTP requests. + operationId: getHttpRequests + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of client HTTP requests statistics + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPRequests' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Requests + - Method DELETE + summary: Reset HTTP requests statistics + description: Resets the number of total client HTTP requests. + operationId: deleteHttpRequests + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/server_zones/: + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of all HTTP server zones + description: Returns status information for each HTTP + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getHttpServerZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/server_zones/{httpServerZoneName}': + parameters: + - name: httpServerZoneName + in: path + description: The name of an HTTP server zone. + type: string + required: true + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of an HTTP server zone + description: Returns status of a particular HTTP server zone. + operationId: getHttpServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Server Zones + - Method DELETE + summary: Reset statistics for an HTTP server zone + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular HTTP server zone. + operationId: deleteHttpServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/location_zones/: + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of all HTTP location zones + description: Returns status information for each HTTP + [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of location zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/location_zones/{httpLocationZoneName}': + parameters: + - name: httpLocationZoneName + in: path + description: The name of an HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + type: string + required: true + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of an HTTP location zone + description: Returns status of a particular + HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the location zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZone' + '404': + description: | + Location zone not found (*LocationZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Location Zones + - Method DELETE + summary: Reset statistics for a location zone. + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular location zone. + operationId: deleteHttpLocationZoneStat + responses: + '204': + description: Success + '404': + description: | + Location zone not found (*LocationZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/caches/: + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of all caches + description: Returns status of each cache configured by + [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + and other “*_cache_path” directives. + operationId: getHttpCaches + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of cache zones will be output. + If the “fields” value is empty, + then only names of cache zones will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCachesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/caches/{httpCacheZoneName}': + parameters: + - name: httpCacheZoneName + in: path + description: The name of the cache zone. + type: string + required: true + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of a cache + description: Returns status of a particular cache. + operationId: getHttpCacheZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the cache zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCache' + '404': + description: | + Cache not found (*CacheNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Caches + - Method DELETE + summary: Reset cache statistics + description: Resets statistics of cache hits/misses in a particular cache zone. + operationId: deleteHttpCacheZoneStat + responses: + '204': + description: Success + '404': + description: | + Cache not found (*CacheNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/limit_conns/: + get: + tags: + - HTTP Limit Conns + - Method GET + summary: Return status of all HTTP limit_conn zones + description: Returns status information for each HTTP + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + operationId: getHttpLimitConnZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_conn zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitConnZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/limit_conns/{httpLimitConnZoneName}': + parameters: + - name: httpLimitConnZoneName + in: path + description: The name of a + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + type: string + required: true + get: + tags: + - HTTP Limit Conns + - Method GET + summary: Return status of an HTTP limit_conn zone + description: Returns status of a particular HTTP + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + operationId: getHttpLimitConnZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitConnZone' + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Limit Conns + - Method DELETE + summary: Reset statistics for an HTTP limit_conn zone + description: Resets the connection limiting statistics. + operationId: deleteHttpLimitConnZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/limit_reqs/: + get: + tags: + - HTTP Limit Reqs + - Method GET + summary: Return status of all HTTP limit_req zones + description: Returns status information for each HTTP + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + operationId: getHttpLimitReqZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_req zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitReqZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/limit_reqs/{httpLimitReqZoneName}': + parameters: + - name: httpLimitReqZoneName + in: path + description: The name of a + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + type: string + required: true + get: + tags: + - HTTP Limit Reqs + - Method GET + summary: Return status of an HTTP limit_req zone + description: Returns status of a particular HTTP + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + operationId: getHttpLimitReqZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitReqZone' + '404': + description: | + limit_req not found (*LimitReqNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Limit Reqs + - Method DELETE + summary: Reset statistics for an HTTP limit_req zone + description: Resets the requests limiting statistics. + operationId: deleteHttpLimitReqZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_req not found (*LimitReqNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/upstreams/: + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of all HTTP upstream server groups + description: Returns status of each HTTP upstream server group + and its servers. + operationId: getHttpUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an HTTP upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of an HTTP upstream server group + description: Returns status of a particular HTTP upstream server group + and its servers. + operationId: getHttpUpstreamName + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Reset statistics of an HTTP upstream server group + description: Resets the statistics for each upstream server + in an upstream server group and queue statistics. + operationId: deleteHttpUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of all servers in an HTTP upstream server group + description: Returns configuration of each server + in a particular HTTP upstream server group. + operationId: getHttpUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Upstreams + - Method POST + summary: Add a server to an HTTP upstream server group + description: Adds a new server to an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: postHttpUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postHttpUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Entry exists (*EntryExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/{httpUpstreamServerId}': + parameters: + - name: httpUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: httpUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of a server in an HTTP upstream server group + description: Returns configuration of a particular server + in the HTTP upstream server group. + operationId: getHttpUpstreamPeer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Upstreams + - Method PATCH + summary: Modify a server in an HTTP upstream server group + description: Modifies settings of a particular server + in an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: patchHttpUpstreamPeer + produces: + - application/json + parameters: + - in: body + name: patchHttpUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “*ID*” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Remove a server from an HTTP upstream server group + description: Removes a server from an HTTP upstream server group. + operationId: deleteHttpUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/keyvals/: + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from all HTTP keyval zones + description: Returns key-value pairs for each HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only HTTP keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/keyvals/{httpKeyvalZoneName}': + parameters: + - name: httpKeyvalZoneName + in: path + description: The name of an HTTP keyval shared memory zone. + required: true + type: string + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from an HTTP keyval zone + description: Returns key-value pairs stored in a particular HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the HTTP keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Keyvals + - Method POST + summary: Add a key-value pair to the HTTP keyval zone + description: Adds a new key-value pair to the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + operationId: postHttpKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: | + Entry exists (*EntryExists*), + key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchHttpKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: httpKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Keyvals + - Method DELETE + summary: Empty the HTTP keyval zone + description: Deletes all key-value pairs from the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteHttpKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/: + get: + tags: + - Stream + summary: Return list of stream-related endpoints + description: Returns a list of first level stream endpoints. + operationId: getStream + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /stream/server_zones/: + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of all stream server zones + description: Returns status information for each stream + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getStreamServerZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/server_zones/{streamServerZoneName}': + parameters: + - name: streamServerZoneName + in: path + description: The name of a stream server zone. + type: string + required: true + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of a stream server zone + description: Returns status of a particular stream server zone. + operationId: getStreamServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Server Zones + - Method DELETE + summary: Reset statistics for a stream server zone + description: Resets statistics of accepted and discarded connections, sessions, + received and sent bytes in a particular stream server zone. + operationId: deleteStreamServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/limit_conns/: + get: + tags: + - Stream Limit Conns + - Method GET + summary: Return status of all stream limit_conn zones + description: Returns status information for each stream + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + operationId: getStreamLimitConnZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_conn zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamLimitConnZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/limit_conns/{streamLimitConnZoneName}': + parameters: + - name: streamLimitConnZoneName + in: path + description: The name of a + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + type: string + required: true + get: + tags: + - Stream Limit Conns + - Method GET + summary: Return status of an stream limit_conn zone + description: Returns status of a particular stream + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + operationId: getStreamLimitConnZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamLimitConnZone' + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Limit Conns + - Method DELETE + summary: Reset statistics for a stream limit_conn zone + description: Resets the connection limiting statistics. + operationId: deleteStreamLimitConnZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/upstreams/: + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of all stream upstream server groups + description: Returns status of each stream upstream server group + and its servers. + operationId: getStreamUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/': + parameters: + - name: streamUpstreamName + in: path + description: The name of a stream upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of a stream upstream server group + description: Returns status of a particular stream upstream server group + and its servers. + operationId: getStreamUpstream + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Reset statistics of a stream upstream server group + description: Resets the statistics for each upstream server + in an upstream server group. + operationId: deleteStreamUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/': + parameters: + - name: streamUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of all servers in a stream upstream server group + description: Returns configuration of each server + in a particular stream upstream server group. + operationId: getStreamUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Upstreams + - Method POST + summary: Add a server to a stream upstream server group + description: Adds a new server to a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: postStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postStreamUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Entry exists (*EntryExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/{streamUpstreamServerId}': + parameters: + - name: streamUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: streamUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of a server in a stream upstream server group + description: Returns configuration of a particular server + in the stream upstream server group. + operationId: getStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Upstreams + - Method PATCH + summary: Modify a server in a stream upstream server group + description: Modifies settings of a particular server + in a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: patchStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: patchStreamUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “**ID**” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Remove a server from a stream upstream server group + description: Removes a server from a stream server group. + operationId: deleteStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/keyvals/: + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from all stream keyval zones + description: Returns key-value pairs for each stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only stream keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/keyvals/{streamKeyvalZoneName}': + parameters: + - name: streamKeyvalZoneName + in: path + description: The name of a stream keyval shared memory zone. + required: true + type: string + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from a stream keyval zone + description: Returns key-value pairs stored in a particular stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the stream keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Keyvals + - Method POST + summary: Add a key-value pair to the stream keyval zone + description: Adds a new key-value pair to the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + operationId: postStreamKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: | + Entry exists (*EntryExists*), + key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time is specified in milliseconds + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchStreamKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: streamKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Keyvals + - Method DELETE + summary: Empty the stream keyval zone + description: Deletes all key-value pairs from the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteStreamKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/zone_sync/: + get: + tags: + - Stream Zone Sync + - Method GET + summary: Return sync status of a node + description: Returns synchronization status of a cluster node. + operationId: getStreamZoneSync + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamZoneSync' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /resolvers/: + get: + tags: + - Resolvers + - Method GET + summary: Return status for all resolver zones + description: Returns status information for each + [resolver zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZones + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of resolvers statistics will be output. + '/resolvers/{resolverZoneName}': + parameters: + - name: resolverZoneName + in: path + description: The name of a resolver zone. + required: true + type: string + get: + tags: + - Resolvers + - Method GET + summary: Return statistics of a resolver zone + description: Returns statistics stored in a particular resolver + [zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the resolver zone will be output + (requests, responses, or both). + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZone' + '404': + description: | + Resolver zone not found (*ResolverZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Resolvers + - Method DELETE + summary: Reset statistics for a resolver zone. + description: Resets statistics in a particular resolver zone. + operationId: deleteResolverZoneStat + responses: + '204': + description: Success + '404': + description: | + Resolver zone not found (*ResolverZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /ssl: + get: + tags: + - SSL + - Method GET + summary: Return SSL statistics + description: Returns SSL statistics. + operationId: getSsl + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSSLObject' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of SSL statistics will be output. + delete: + tags: + - SSL + - Method DELETE + summary: Reset SSL statistics + description: Resets counters of SSL handshakes and session reuses. + operationId: deleteSslStat + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' +### +###DEFINITIONS +### +definitions: + ArrayOfStrings: + title: Array + description: | + An array of strings. + type: array + items: + type: string + NginxObject: + title: nginx + description: | + General information about nginx: + type: object + properties: + version: + type: string + description: Version of nginx. + build: + type: string + description: Name of nginx build. + address: + type: string + description: The address of the server that accepted status request. + generation: + type: integer + description: The total number of configuration + reloads. + load_timestamp: + type: string + format: date-time + description: Time of the last reload of configuration, + in the ISO 8601 format with millisecond resolution. + timestamp: + type: string + format: date-time + description: Current time + in the ISO 8601 format with millisecond resolution. + pid: + type: integer + description: The ID of the worker process that handled status request. + ppid: + type: integer + description: The ID of the master process that started the + worker process. + example: + nginx: + version: 1.17.3 + build: nginx-plus-r19 + address: 206.251.255.64 + generation: 6 + load_timestamp: 2019-10-01T11:15:44.467Z + timestamp: 2019-10-01T09:26:07.305Z + pid: 32212 + ppid: 32210 + NginxProcesses: + title: Processes + type: object + properties: + respawned: + type: integer + description: The total number of abnormally terminated + and respawned child processes. + example: + respawned: 0 + NginxConnections: + title: Connections + description: | + The number of accepted, dropped, active, and idle connections. + type: object + properties: + accepted: + type: integer + description: The total number of accepted client connections. + dropped: + type: integer + description: The total number of dropped client connections. + active: + type: integer + description: The current number of active client connections. + idle: + type: integer + description: The current number of idle client connections. + example: + accepted: 4968119 + dropped: 0 + active: 5 + idle: 117 + NginxSSLObject: + title: SSL + type: object + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + example: + handshakes: 79572 + handshakes_failed: 21025 + session_reuses: 15762 + NginxSlabZoneMap: + title: Slab Zones + description: | + Status zones that use slab allocator. + type: object + additionalProperties: + $ref: '#/definitions/NginxSlabZone' + example: + http_cache: + pages: + used: 2 + free: 2452 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 1 + free: 126 + reqs: 1 + fails: 0 + 64: + used: 2 + free: 62 + reqs: 2 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + dns-backends: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZone: + title: Shared memory zone with slab allocator + description: | + type: object + properties: + pages: + type: object + description: The number of free and used memory pages. + properties: + used: + type: integer + description: The current number of used memory pages. + free: + type: integer + description: The current number of free memory pages. + slots: + type: object + title: Memory Slots + description: Status data for memory slots (8, 16, 32, 64, 128, etc.) + additionalProperties: + $ref: '#/definitions/NginxSlabZoneSlot' + example: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZoneSlot: + title: Memory Slot + type: object + properties: + used: + type: integer + description: The current number of used memory slots. + free: + type: integer + description: The current number of free memory slots. + reqs: + type: integer + description: The total number of attempts + to allocate memory of specified size. + fails: + type: integer + description: The number of unsuccessful attempts + to allocate memory of specified size. + NginxHTTPRequests: + title: HTTP Requests + type: object + properties: + total: + type: integer + description: The total number of client requests. + current: + type: integer + description: The current number of client requests. + example: + total: 10624511 + current: 4 + NginxHTTPServerZonesMap: + title: HTTP Server Zones + description: | + Status data for all HTTP + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPServerZone' + example: + site1: + processing: 2 + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + processing: 1 + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPServerZone: + title: HTTP Server Zone + type: object + properties: + processing: + type: integer + description: The number of client requests + that are currently being processed. + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients and the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + processing: 1 + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPLocationZonesMap: + title: HTTP Location Zones + description: | + Status data for all HTTP + location zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLocationZone' + example: + site1: + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPLocationZone: + title: HTTP Location Zone + type: object + properties: + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients and the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPCachesMap: + title: HTTP Caches + description: | + Status information of all HTTP caches configured by + proxy_cache_path + and other “*_cache_path” directives. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPCache' + example: + http-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + frontend-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPCache: + title: HTTP Cache + type: object + properties: + size: + type: integer + description: The current size of the cache. + max_size: + type: integer + description: The limit on the maximum size of the cache + specified in the configuration. + cold: + type: boolean + description: A boolean value indicating whether the “cache loader” process + is still loading data from disk into the cache. + hit: + type: object + properties: + responses: + type: integer + description: The total number of + valid + responses read from the cache. + bytes: + type: integer + description: The total number of bytes read from the cache. + stale: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + updating: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache + while responses were being updated (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + revalidated: + type: object + properties: + responses: + type: integer + description: The total number of expired and revalidated responses + read from the cache (see + proxy_cache_revalidate + and other “*_cache_revalidate” directives. + bytes: + type: integer + description: The total number of bytes read from the cache. + miss: + type: object + properties: + responses: + type: integer + description: The total number of responses not found in the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + expired: + type: object + properties: + responses: + type: integer + description: The total number of expired responses not taken from the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + bypass: + type: object + properties: + responses: + type: integer + description: The total number of responses not looked up in the cache due to the + proxy_cache_bypass + and other “*_cache_bypass” directives. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + example: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPLimitConnZonesMap: + title: HTTP limit_conns + description: | + Status data for all HTTP + limit_conn zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLimitConnZone' + NginxHTTPLimitConnZone: + title: HTTP Connections Limiting + type: object + properties: + passed: + type: integer + description: The total number of connections that were neither limited + nor accounted as limited. + rejected: + type: integer + description: The total number of connections that were rejected. + rejected_dry_run: + type: integer + description: The total number of connections accounted as rejected in the + dry run + mode. + example: + passed: 15 + rejected: 0 + rejected_dry_run: 2 + NginxHTTPLimitReqZonesMap: + title: HTTP limit_reqs + description: | + Status data for all HTTP + limit_req zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLimitReqZone' + NginxHTTPLimitReqZone: + title: HTTP Requests Rate Limiting + type: object + properties: + passed: + type: integer + description: The total number of requests that were neither limited + nor accounted as limited. + delayed: + type: integer + description: The total number of requests that were delayed. + rejected: + type: integer + description: The total number of requests that were rejected. + delayed_dry_run: + type: integer + description: The total number of requests accounted as delayed in the + dry run + mode. + rejected_dry_run: + type: integer + description: The total number of requests accounted as rejected in the + dry run + mode. + example: + passed: 15 + delayed: 4 + rejected: 0 + delayed_dry_run: 1 + rejected_dry_run: 2 + NginxHTTPUpstreamMap: + title: HTTP Upstreams + description: | + Status information of all HTTP + dynamically configurable + groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPUpstream' + example: + trac-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + keepalive: 0 + zombies: 0 + zone: trac-backend + hg-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + keepalive: 0 + zombies: 0 + zone: hg-backend + NginxHTTPUpstream: + title: HTTP Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxHTTPUpstreamPeerMap' + keepalive: + type: integer + description: The current number of idle + keepalive + connections. + zombies: + type: integer + description: The current number of servers removed + from the group but still processing active client requests. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + queue: + type: object + description: > + For the requests + queue, + the following data are provided: + properties: + size: + type: integer + description: The current number of requests in the queue. + max_size: + type: integer + description: The maximum number of requests that can be in the queue + at the same time. + overflows: + type: integer + description: The total number of requests rejected due to the queue overflow. + example: + upstream_backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 20 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 20 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + keepalive: 0 + zombies: 0 + zone: upstream_backend + NginxHTTPUpstreamPeerMap: + title: HTTP Upstream Servers + description: | + An array of HTTP + upstream servers. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamPeer' + NginxHTTPUpstreamPeer: + title: HTTP Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + enum: + - up + - draining + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “draining”, “down”, + “unavail”, “checking”, + and “unhealthy”. + active: + type: integer + description: The current number of active connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + requests: + type: integer + description: The total number of client requests forwarded to this server. + readOnly: true + responses: + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + total: + type: integer + description: The total number of responses obtained from this server. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client requests + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + fails: + type: integer + description: The number of failed health checks. + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + last_passed: + type: boolean + description: Boolean indicating if the last health check request was successful + and passed + tests. + downtime: + type: integer + readOnly: true + description: Total time the server was in the “unavail”, + “checking”, and “unhealthy” states. + downstart: + type: string + format: date-time + readOnly: true + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + selected: + type: string + format: date-time + readOnly: true + description: The time when the server was last selected to process a request, + in the ISO 8601 format with millisecond resolution. + header_time: + type: integer + readOnly: true + description: The average time to get the + response header + from the server. + response_time: + type: integer + readOnly: true + description: The average time to get the + full response + from the server. + NginxHTTPUpstreamConfServerMap: + title: HTTP Upstream Servers + description: An array of HTTP upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:8088 + weight: 1 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: false + down: false + - id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPUpstreamConfServer: + title: HTTP Upstream Server + description: | + Dynamically configurable parameters of an HTTP upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the HTTP upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the HTTP upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “http” block. + See also the + resolve + parameter of the HTTP upstream server. + service: + type: string + description: Same as the + service + parameter of the HTTP upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the HTTP upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the HTTP upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the HTTP upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the HTTP upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the HTTP upstream server. + route: + type: string + description: Same as the + route + parameter of the HTTP upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the HTTP upstream server. + drain: + type: boolean + description: Same as the + drain + parameter of the HTTP upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPKeyvalZonesMap: + title: HTTP Keyval Shared Memory Zones + description: | + Contents of all HTTP keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxHTTPKeyvalZone: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxHTTPKeyvalZonePostPatch: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamServerZonesMap: + title: Stream Server Zones + description: | + Status information for all stream + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamServerZone' + example: + mysql-frontend: + processing: 2 + connections: 270925 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 270925 + discarded: 0 + received: 28988975 + sent: 3879346317 + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamServerZone: + title: Stream Server Zone + type: object + properties: + processing: + type: integer + description: The number of client connections + that are currently being processed. + connections: + type: integer + description: The total number of connections accepted from clients. + sessions: + type: object + description: The total number of completed sessions, + and the number of sessions completed with status codes + “2xx”, “4xx”, or “5xx”. + properties: + 2xx: + type: integer + description: The total number of sessions completed with + status codes + “2xx”. + 4xx: + type: integer + description: The total number of sessions completed with + status codes + “4xx”. + 5xx: + type: integer + description: The total number of sessions completed with + status codes + “5xx”. + total: + type: integer + description: The total number of completed client sessions. + discarded: + type: integer + description: The total number of + connections completed without creating a session. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamLimitConnZonesMap: + title: Stream limit_conns + description: | + Status data for all stream + limit_conn zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamLimitConnZone' + NginxStreamLimitConnZone: + title: Stream Connections Limiting + type: object + properties: + passed: + type: integer + description: The total number of connections that were neither limited + nor accounted as limited. + rejected: + type: integer + description: The total number of connections that were rejected. + rejected_dry_run: + type: integer + description: The total number of connections accounted as rejected in the + dry run + mode. + example: + passed: 15 + rejected: 0 + rejected_dry_run: 2 + NginxStreamUpstreamMap: + title: Stream Upstreams + description: Status information of stream upstream server groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamUpstream' + example: + mysql_backends: + peers: + - id: 0 + server: 10.0.0.1:12345 + name: 10.0.0.1:12345 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connecions: 1231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:12346 + name: 10.0.0.1:12346 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 30 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + zombies: 0 + zone: mysql_backends + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + connections: 0 + max_conns: 30 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstream: + title: Stream Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxStreamUpstreamPeerMap' + zombies: + type: integer + description: The current number of servers removed from the group + but still processing active client connections. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + example: + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 50 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 50 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstreamPeerMap: + title: Stream Upstream Servers + description: Array of stream upstream servers. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamPeer' + NginxStreamUpstreamPeer: + title: Stream Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An + address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + format: hostname + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + readOnly: true + enum: + - up + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “down”, “unavail”, + “checking”, or “unhealthy”. + active: + type: integer + description: The current number of connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + connections: + type: integer + description: The total number of client connections forwarded to this server. + readOnly: true + connect_time: + type: integer + description: The average time to connect to the upstream server. + readOnly: true + first_byte_time: + type: integer + description: The average time to receive the first byte of data. + readOnly: true + response_time: + type: integer + description: The average time to receive the last byte of data. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client connections + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + readOnly: true + fails: + type: integer + description: The number of failed health checks. + readOnly: true + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + readOnly: true + last_passed: + type: boolean + description: Boolean indicating whether the last health check request + was successful and passed + tests. + readOnly: true + downtime: + type: integer + description: Total time the server was in the + “unavail”, “checking”, + and “unhealthy” states. + readOnly: true + downstart: + type: string + format: date-time + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + readOnly: true + selected: + type: string + format: date-time + description: The time when the server was last selected + to process a connection, + in the ISO 8601 format with millisecond resolution. + readOnly: true + NginxStreamUpstreamConfServerMap: + title: Stream Upstream Servers + description: | + An array of stream upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + - id: 1 + server: 10.0.0.1:12349 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamUpstreamConfServer: + title: Stream Upstream Server + description: | + Dynamically configurable parameters of a stream upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the stream upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the stream upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “stream” block. + See also the + resolve + parameter of the stream upstream server. + service: + type: string + description: Same as the + service + parameter of the stream upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the stream upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the stream upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the stream upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the stream upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the stream upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the stream upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamKeyvalZonesMap: + title: Stream Keyval Shared Memory Zones + description: | + Contents of all stream keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxStreamKeyvalZone: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamKeyvalZonePostPatch: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamZoneSync: + title: Stream Zone Sync Node + type: object + properties: + zones: + type: object + title: Zone Sync Zones + description: Synchronization information per each shared memory zone. + additionalProperties: + $ref: '#/definitions/NginxStreamZoneSyncZone' + status: + type: object + description: Synchronization information per node in a cluster. + properties: + bytes_in: + type: integer + description: The number of bytes received by this node. + msgs_in: + type: integer + description: The number of messages received by this node. + msgs_out: + type: integer + description: The number of messages sent by this node. + bytes_out: + type: integer + description: The number of bytes sent by this node. + nodes_online: + type: integer + description: The number of peers this node is connected to. + example: + zones: + zone1: + records_pending: 2061 + records_total: 260575 + zone2: + records_pending: 0 + records_total: 14749 + status: + bytes_in: 1364923761 + msgs_in: 337236 + msgs_out: 346717 + bytes_out: 1402765472 + nodes_online: 15 + NginxStreamZoneSyncZone: + title: Sync Zone + description: Synchronization status of a shared memory zone. + type: object + properties: + records_pending: + type: integer + description: The number of records that need to be sent to the cluster. + records_total: + type: integer + description: The total number of records stored in the shared memory zone. + NginxResolverZonesMap: + title: Resolver Zones + description: | + Status data for all + resolver zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxResolverZone' + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + resolver_zone2: + requests: + name: 325460 + srv: 1130 + addr: 12580 + responses: + noerror: 226499 + formerr: 0 + servfail: 283 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 743 + unknown: 1478 + NginxResolverZone: + title: Resolver Zone + description: | + Statistics of DNS requests and responses per particular + resolver zone. + type: object + properties: + requests: + type: object + readOnly: true + properties: + name: + type: integer + description: The total number of requests + to resolve names to addresses. + readOnly: true + srv: + type: integer + description: The total number of requests + to resolve SRV records. + readOnly: true + addr: + type: integer + description: The total number of requests + to resolve addresses to names. + readOnly: true + responses: + type: object + readOnly: true + properties: + noerror: + type: integer + description: The total number of successful responses. + readOnly: true + formerr: + type: integer + description: The total number of + FORMERR (Format error) responses. + readOnly: true + servfail: + type: integer + description: The total number of + SERVFAIL (Server failure) responses. + readOnly: true + nxdomain: + type: integer + description: The total number of + NXDOMAIN (Host not found) responses. + readOnly: true + notimp: + type: integer + description: The total number of + NOTIMP (Unimplemented) responses. + readOnly: true + refused: + type: integer + description: The total number of + REFUSED (Operation refused) responses. + readOnly: true + timedout: + type: integer + description: The total number of timed out requests. + readOnly: true + unknown: + type: integer + description: The total number of requests + completed with an unknown error. + readOnly: true + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + NginxError: + title: Error + description: | + nginx error object. + type: object + properties: + error: + type: object + properties: + status: + type: integer + description: HTTP error code. + text: + type: string + description: Error description. + code: + type: string + description: Internal nginx error code. + request_id: + type: string + description: The ID of the request, equals the value of the + $request_id + variable. + href: + type: string + description: Link to reference documentation. diff --git a/content/nginx/admin-guide/yaml/v7/nginx_api.yaml b/content/nginx/admin-guide/yaml/v7/nginx_api.yaml new file mode 100644 index 000000000..86e289362 --- /dev/null +++ b/content/nginx/admin-guide/yaml/v7/nginx_api.yaml @@ -0,0 +1,4256 @@ +swagger: '2.0' +info: + version: '7.0' + title: NGINX Plus REST API + description: NGINX Plus REST + [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + provides access to NGINX Plus status information, + on-the-fly configuration of upstream servers and + key-value pairs management for + [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and + [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). +basePath: /api/7 +tags: + - name: General Info + - name: Processes + - name: Connections + - name: Slabs + - name: Resolvers + - name: SSL + - name: HTTP + - name: HTTP Requests + - name: HTTP Server Zones + - name: HTTP Location Zones + - name: HTTP Caches + - name: HTTP Limit Conns + - name: HTTP Limit Reqs + - name: HTTP Keyvals + - name: HTTP Upstreams + - name: Stream + - name: Stream Server Zones + - name: Stream Limit Conns + - name: Stream Keyvals + - name: Stream Upstreams + - name: Stream Zone Sync + - name: Method GET + - name: Method POST + - name: Method PATCH + - name: Method DELETE +schemes: + - http + - https +paths: + /: + get: + tags: + - General Info + - Method GET + summary: Return list of root endpoints + description: Returns a list of root endpoints. + operationId: getAPIEndpoints + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /nginx: + get: + tags: + - General Info + - Method GET + summary: Return status of nginx running instance + description: Returns nginx version, build name, address, + number of configuration reloads, IDs of master and worker processes. + operationId: getNginx + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of nginx running instance will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxObject' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /processes: + get: + tags: + - Processes + - Method GET + summary: Return nginx processes status + description: Returns the number of abnormally terminated + and respawned child processes. + operationId: getProcesses + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxProcesses' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Processes + - Method DELETE + summary: Reset nginx processes statistics + description: Resets counters of abnormally terminated and respawned + child processes. + operationId: deleteProcesses + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /connections: + get: + tags: + - Connections + - Method GET + summary: Return client connections statistics + description: Returns statistics of client connections. + operationId: getConnections + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxConnections' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the connections statistics will be output. + delete: + tags: + - Connections + - Method DELETE + summary: Reset client connections statistics + description: Resets statistics of accepted and dropped + client connections. + operationId: deleteConnections + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /slabs/: + get: + tags: + - Slabs + - Method GET + summary: Return status of all slabs + description: Returns status of slabs + for each shared memory zone with slab allocator. + operationId: getSlabs + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of slab zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZoneMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/slabs/{slabZoneName}': + parameters: + - name: slabZoneName + in: path + description: The name of the shared memory zone with slab allocator. + required: true + type: string + get: + tags: + - Slabs + - Method GET + summary: Return status of a slab + description: Returns status of slabs for a particular shared memory zone + with slab allocator. + operationId: getSlabZone + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the slab zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZone' + '404': + description: | + Slab not found (*SlabNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Slabs + - Method DELETE + summary: Reset slab statistics + description: Resets the “reqs” and “fails” + metrics for each memory slot. + operationId: deleteSlabZoneStats + responses: + '204': + description: Success + '404': + description: | + Slab not found (*SlabNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/: + get: + tags: + - HTTP + - Method GET + summary: Return list of HTTP-related endpoints + description: Returns a list of first level HTTP endpoints. + operationId: getHttp + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /http/requests: + get: + tags: + - HTTP Requests + - Method GET + summary: Return HTTP requests statistics + description: Returns status of client HTTP requests. + operationId: getHttpRequests + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of client HTTP requests statistics + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPRequests' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Requests + - Method DELETE + summary: Reset HTTP requests statistics + description: Resets the number of total client HTTP requests. + operationId: deleteHttpRequests + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/server_zones/: + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of all HTTP server zones + description: Returns status information for each HTTP + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getHttpServerZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/server_zones/{httpServerZoneName}': + parameters: + - name: httpServerZoneName + in: path + description: The name of an HTTP server zone. + type: string + required: true + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of an HTTP server zone + description: Returns status of a particular HTTP server zone. + operationId: getHttpServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Server Zones + - Method DELETE + summary: Reset statistics for an HTTP server zone + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular HTTP server zone. + operationId: deleteHttpServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/location_zones/: + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of all HTTP location zones + description: Returns status information for each HTTP + [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of location zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/location_zones/{httpLocationZoneName}': + parameters: + - name: httpLocationZoneName + in: path + description: The name of an HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + type: string + required: true + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of an HTTP location zone + description: Returns status of a particular + HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the location zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZone' + '404': + description: | + Location zone not found (*LocationZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Location Zones + - Method DELETE + summary: Reset statistics for a location zone. + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular location zone. + operationId: deleteHttpLocationZoneStat + responses: + '204': + description: Success + '404': + description: | + Location zone not found (*LocationZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/caches/: + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of all caches + description: Returns status of each cache configured by + [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + and other “*_cache_path” directives. + operationId: getHttpCaches + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of cache zones will be output. + If the “fields” value is empty, + then only names of cache zones will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCachesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/caches/{httpCacheZoneName}': + parameters: + - name: httpCacheZoneName + in: path + description: The name of the cache zone. + type: string + required: true + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of a cache + description: Returns status of a particular cache. + operationId: getHttpCacheZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the cache zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCache' + '404': + description: | + Cache not found (*CacheNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Caches + - Method DELETE + summary: Reset cache statistics + description: Resets statistics of cache hits/misses in a particular cache zone. + operationId: deleteHttpCacheZoneStat + responses: + '204': + description: Success + '404': + description: | + Cache not found (*CacheNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/limit_conns/: + get: + tags: + - HTTP Limit Conns + - Method GET + summary: Return status of all HTTP limit_conn zones + description: Returns status information for each HTTP + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + operationId: getHttpLimitConnZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_conn zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitConnZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/limit_conns/{httpLimitConnZoneName}': + parameters: + - name: httpLimitConnZoneName + in: path + description: The name of a + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + type: string + required: true + get: + tags: + - HTTP Limit Conns + - Method GET + summary: Return status of an HTTP limit_conn zone + description: Returns status of a particular HTTP + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + operationId: getHttpLimitConnZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitConnZone' + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Limit Conns + - Method DELETE + summary: Reset statistics for an HTTP limit_conn zone + description: Resets the connection limiting statistics. + operationId: deleteHttpLimitConnZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/limit_reqs/: + get: + tags: + - HTTP Limit Reqs + - Method GET + summary: Return status of all HTTP limit_req zones + description: Returns status information for each HTTP + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + operationId: getHttpLimitReqZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_req zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitReqZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/limit_reqs/{httpLimitReqZoneName}': + parameters: + - name: httpLimitReqZoneName + in: path + description: The name of a + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + type: string + required: true + get: + tags: + - HTTP Limit Reqs + - Method GET + summary: Return status of an HTTP limit_req zone + description: Returns status of a particular HTTP + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + operationId: getHttpLimitReqZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitReqZone' + '404': + description: | + limit_req not found (*LimitReqNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Limit Reqs + - Method DELETE + summary: Reset statistics for an HTTP limit_req zone + description: Resets the requests limiting statistics. + operationId: deleteHttpLimitReqZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_req not found (*LimitReqNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/upstreams/: + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of all HTTP upstream server groups + description: Returns status of each HTTP upstream server group + and its servers. + operationId: getHttpUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an HTTP upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of an HTTP upstream server group + description: Returns status of a particular HTTP upstream server group + and its servers. + operationId: getHttpUpstreamName + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Reset statistics of an HTTP upstream server group + description: Resets the statistics for each upstream server + in an upstream server group and queue statistics. + operationId: deleteHttpUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of all servers in an HTTP upstream server group + description: Returns configuration of each server + in a particular HTTP upstream server group. + operationId: getHttpUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Upstreams + - Method POST + summary: Add a server to an HTTP upstream server group + description: Adds a new server to an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: postHttpUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postHttpUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Entry exists (*EntryExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/{httpUpstreamServerId}': + parameters: + - name: httpUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: httpUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of a server in an HTTP upstream server group + description: Returns configuration of a particular server + in the HTTP upstream server group. + operationId: getHttpUpstreamPeer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Upstreams + - Method PATCH + summary: Modify a server in an HTTP upstream server group + description: Modifies settings of a particular server + in an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: patchHttpUpstreamPeer + produces: + - application/json + parameters: + - in: body + name: patchHttpUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “*ID*” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Remove a server from an HTTP upstream server group + description: Removes a server from an HTTP upstream server group. + operationId: deleteHttpUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/keyvals/: + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from all HTTP keyval zones + description: Returns key-value pairs for each HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only HTTP keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/keyvals/{httpKeyvalZoneName}': + parameters: + - name: httpKeyvalZoneName + in: path + description: The name of an HTTP keyval shared memory zone. + required: true + type: string + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from an HTTP keyval zone + description: Returns key-value pairs stored in a particular HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the HTTP keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Keyvals + - Method POST + summary: Add a key-value pair to the HTTP keyval zone + description: Adds a new key-value pair to the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + operationId: postHttpKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: | + Entry exists (*EntryExists*), + key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchHttpKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: httpKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Keyvals + - Method DELETE + summary: Empty the HTTP keyval zone + description: Deletes all key-value pairs from the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteHttpKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/: + get: + tags: + - Stream + summary: Return list of stream-related endpoints + description: Returns a list of first level stream endpoints. + operationId: getStream + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /stream/server_zones/: + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of all stream server zones + description: Returns status information for each stream + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getStreamServerZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/server_zones/{streamServerZoneName}': + parameters: + - name: streamServerZoneName + in: path + description: The name of a stream server zone. + type: string + required: true + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of a stream server zone + description: Returns status of a particular stream server zone. + operationId: getStreamServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Server Zones + - Method DELETE + summary: Reset statistics for a stream server zone + description: Resets statistics of accepted and discarded connections, sessions, + received and sent bytes in a particular stream server zone. + operationId: deleteStreamServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/limit_conns/: + get: + tags: + - Stream Limit Conns + - Method GET + summary: Return status of all stream limit_conn zones + description: Returns status information for each stream + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + operationId: getStreamLimitConnZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_conn zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamLimitConnZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/limit_conns/{streamLimitConnZoneName}': + parameters: + - name: streamLimitConnZoneName + in: path + description: The name of a + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + type: string + required: true + get: + tags: + - Stream Limit Conns + - Method GET + summary: Return status of an stream limit_conn zone + description: Returns status of a particular stream + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + operationId: getStreamLimitConnZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamLimitConnZone' + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Limit Conns + - Method DELETE + summary: Reset statistics for a stream limit_conn zone + description: Resets the connection limiting statistics. + operationId: deleteStreamLimitConnZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/upstreams/: + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of all stream upstream server groups + description: Returns status of each stream upstream server group + and its servers. + operationId: getStreamUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/': + parameters: + - name: streamUpstreamName + in: path + description: The name of a stream upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of a stream upstream server group + description: Returns status of a particular stream upstream server group + and its servers. + operationId: getStreamUpstream + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Reset statistics of a stream upstream server group + description: Resets the statistics for each upstream server + in an upstream server group. + operationId: deleteStreamUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/': + parameters: + - name: streamUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of all servers in a stream upstream server group + description: Returns configuration of each server + in a particular stream upstream server group. + operationId: getStreamUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Upstreams + - Method POST + summary: Add a server to a stream upstream server group + description: Adds a new server to a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: postStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postStreamUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Entry exists (*EntryExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/{streamUpstreamServerId}': + parameters: + - name: streamUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: streamUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of a server in a stream upstream server group + description: Returns configuration of a particular server + in the stream upstream server group. + operationId: getStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Upstreams + - Method PATCH + summary: Modify a server in a stream upstream server group + description: Modifies settings of a particular server + in a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: patchStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: patchStreamUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “**ID**” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Remove a server from a stream upstream server group + description: Removes a server from a stream server group. + operationId: deleteStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/keyvals/: + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from all stream keyval zones + description: Returns key-value pairs for each stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only stream keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/keyvals/{streamKeyvalZoneName}': + parameters: + - name: streamKeyvalZoneName + in: path + description: The name of a stream keyval shared memory zone. + required: true + type: string + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from a stream keyval zone + description: Returns key-value pairs stored in a particular stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the stream keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Keyvals + - Method POST + summary: Add a key-value pair to the stream keyval zone + description: Adds a new key-value pair to the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + operationId: postStreamKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: | + Entry exists (*EntryExists*), + key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time is specified in milliseconds + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchStreamKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: streamKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Keyvals + - Method DELETE + summary: Empty the stream keyval zone + description: Deletes all key-value pairs from the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteStreamKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/zone_sync/: + get: + tags: + - Stream Zone Sync + - Method GET + summary: Return sync status of a node + description: Returns synchronization status of a cluster node. + operationId: getStreamZoneSync + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamZoneSync' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /resolvers/: + get: + tags: + - Resolvers + - Method GET + summary: Return status for all resolver zones + description: Returns status information for each + [resolver zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZones + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of resolvers statistics will be output. + '/resolvers/{resolverZoneName}': + parameters: + - name: resolverZoneName + in: path + description: The name of a resolver zone. + required: true + type: string + get: + tags: + - Resolvers + - Method GET + summary: Return statistics of a resolver zone + description: Returns statistics stored in a particular resolver + [zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the resolver zone will be output + (requests, responses, or both). + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZone' + '404': + description: | + Resolver zone not found (*ResolverZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Resolvers + - Method DELETE + summary: Reset statistics for a resolver zone. + description: Resets statistics in a particular resolver zone. + operationId: deleteResolverZoneStat + responses: + '204': + description: Success + '404': + description: | + Resolver zone not found (*ResolverZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /ssl: + get: + tags: + - SSL + - Method GET + summary: Return SSL statistics + description: Returns SSL statistics. + operationId: getSsl + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSSLObject' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of SSL statistics will be output. + delete: + tags: + - SSL + - Method DELETE + summary: Reset SSL statistics + description: Resets counters of SSL handshakes and session reuses. + operationId: deleteSslStat + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' +### +###DEFINITIONS +### +definitions: + ArrayOfStrings: + title: Array + description: | + An array of strings. + type: array + items: + type: string + NginxObject: + title: nginx + description: | + General information about nginx: + type: object + properties: + version: + type: string + description: Version of nginx. + build: + type: string + description: Name of nginx build. + address: + type: string + description: The address of the server that accepted status request. + generation: + type: integer + description: The total number of configuration + reloads. + load_timestamp: + type: string + format: date-time + description: Time of the last reload of configuration, + in the ISO 8601 format with millisecond resolution. + timestamp: + type: string + format: date-time + description: Current time + in the ISO 8601 format with millisecond resolution. + pid: + type: integer + description: The ID of the worker process that handled status request. + ppid: + type: integer + description: The ID of the master process that started the + worker process. + example: + nginx: + version: 1.17.3 + build: nginx-plus-r19 + address: 206.251.255.64 + generation: 6 + load_timestamp: 2019-10-01T11:15:44.467Z + timestamp: 2019-10-01T09:26:07.305Z + pid: 32212 + ppid: 32210 + NginxProcesses: + title: Processes + type: object + properties: + respawned: + type: integer + description: The total number of abnormally terminated + and respawned child processes. + example: + respawned: 0 + NginxConnections: + title: Connections + description: | + The number of accepted, dropped, active, and idle connections. + type: object + properties: + accepted: + type: integer + description: The total number of accepted client connections. + dropped: + type: integer + description: The total number of dropped client connections. + active: + type: integer + description: The current number of active client connections. + idle: + type: integer + description: The current number of idle client connections. + example: + accepted: 4968119 + dropped: 0 + active: 5 + idle: 117 + NginxSSLObject: + title: SSL + type: object + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + example: + handshakes: 79572 + handshakes_failed: 21025 + session_reuses: 15762 + NginxSlabZoneMap: + title: Slab Zones + description: | + Status zones that use slab allocator. + type: object + additionalProperties: + $ref: '#/definitions/NginxSlabZone' + example: + http_cache: + pages: + used: 2 + free: 2452 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 1 + free: 126 + reqs: 1 + fails: 0 + 64: + used: 2 + free: 62 + reqs: 2 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + dns-backends: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZone: + title: Shared memory zone with slab allocator + description: | + type: object + properties: + pages: + type: object + description: The number of free and used memory pages. + properties: + used: + type: integer + description: The current number of used memory pages. + free: + type: integer + description: The current number of free memory pages. + slots: + type: object + title: Memory Slots + description: Status data for memory slots (8, 16, 32, 64, 128, etc.) + additionalProperties: + $ref: '#/definitions/NginxSlabZoneSlot' + example: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZoneSlot: + title: Memory Slot + type: object + properties: + used: + type: integer + description: The current number of used memory slots. + free: + type: integer + description: The current number of free memory slots. + reqs: + type: integer + description: The total number of attempts + to allocate memory of specified size. + fails: + type: integer + description: The number of unsuccessful attempts + to allocate memory of specified size. + NginxHTTPRequests: + title: HTTP Requests + type: object + properties: + total: + type: integer + description: The total number of client requests. + current: + type: integer + description: The current number of client requests. + example: + total: 10624511 + current: 4 + NginxHTTPServerZonesMap: + title: HTTP Server Zones + description: | + Status data for all HTTP + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPServerZone' + example: + site1: + processing: 2 + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + codes: + 200: 727270 + 301: 4614 + 404: 930 + 503: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + processing: 1 + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + codes: + 200: 112674 + 301: 45383 + 404: 2504 + 503: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPServerZone: + title: HTTP Server Zone + type: object + properties: + processing: + type: integer + description: The number of client requests + that are currently being processed. + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients, the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”, and + the number of responses per each status code. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + codes: + type: object + description: The number of responses per each status code. + readOnly: true + properties: + codeNumber: + type: integer + description: The number of responses with this particular status code. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + processing: 1 + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + codes: + 200: 699482 + 301: 4522 + 404: 907 + 503: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPLocationZonesMap: + title: HTTP Location Zones + description: | + Status data for all HTTP + location zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLocationZone' + example: + site1: + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + codes: + 200: 727290 + 301: 4614 + 404: 934 + 503: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + codes: + 200: 112674 + 301: 45383 + 404: 2504 + 503: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPLocationZone: + title: HTTP Location Zone + type: object + properties: + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients, the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”, and + the number of responses per each status code. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + codes: + type: object + description: The number of responses per each status code. + readOnly: true + properties: + codeNumber: + type: integer + description: The number of responses with this particular status code. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + codes: + 200: 112674 + 301: 4522 + 404: 2504 + 503: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPCachesMap: + title: HTTP Caches + description: | + Status information of all HTTP caches configured by + proxy_cache_path + and other “*_cache_path” directives. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPCache' + example: + http-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + frontend-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPCache: + title: HTTP Cache + type: object + properties: + size: + type: integer + description: The current size of the cache. + max_size: + type: integer + description: The limit on the maximum size of the cache + specified in the configuration. + cold: + type: boolean + description: A boolean value indicating whether the “cache loader” process + is still loading data from disk into the cache. + hit: + type: object + properties: + responses: + type: integer + description: The total number of + valid + responses read from the cache. + bytes: + type: integer + description: The total number of bytes read from the cache. + stale: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + updating: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache + while responses were being updated (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + revalidated: + type: object + properties: + responses: + type: integer + description: The total number of expired and revalidated responses + read from the cache (see + proxy_cache_revalidate + and other “*_cache_revalidate” directives. + bytes: + type: integer + description: The total number of bytes read from the cache. + miss: + type: object + properties: + responses: + type: integer + description: The total number of responses not found in the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + expired: + type: object + properties: + responses: + type: integer + description: The total number of expired responses not taken from the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + bypass: + type: object + properties: + responses: + type: integer + description: The total number of responses not looked up in the cache due to the + proxy_cache_bypass + and other “*_cache_bypass” directives. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + example: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPLimitConnZonesMap: + title: HTTP limit_conns + description: | + Status data for all HTTP + limit_conn zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLimitConnZone' + NginxHTTPLimitConnZone: + title: HTTP Connections Limiting + type: object + properties: + passed: + type: integer + description: The total number of connections that were neither limited + nor accounted as limited. + rejected: + type: integer + description: The total number of connections that were rejected. + rejected_dry_run: + type: integer + description: The total number of connections accounted as rejected in the + dry run + mode. + example: + passed: 15 + rejected: 0 + rejected_dry_run: 2 + NginxHTTPLimitReqZonesMap: + title: HTTP limit_reqs + description: | + Status data for all HTTP + limit_req zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLimitReqZone' + NginxHTTPLimitReqZone: + title: HTTP Requests Rate Limiting + type: object + properties: + passed: + type: integer + description: The total number of requests that were neither limited + nor accounted as limited. + delayed: + type: integer + description: The total number of requests that were delayed. + rejected: + type: integer + description: The total number of requests that were rejected. + delayed_dry_run: + type: integer + description: The total number of requests accounted as delayed in the + dry run + mode. + rejected_dry_run: + type: integer + description: The total number of requests accounted as rejected in the + dry run + mode. + example: + passed: 15 + delayed: 4 + rejected: 0 + delayed_dry_run: 1 + rejected_dry_run: 2 + NginxHTTPUpstreamMap: + title: HTTP Upstreams + description: | + Status information of all HTTP + dynamically configurable + groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPUpstream' + example: + trac-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + codes: + 200: 666310 + 404: 915 + 503: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + codes: {} + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + keepalive: 0 + zombies: 0 + zone: trac-backend + hg-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + codes: + 200: 666310 + 404: 915 + 503: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + codes: {} + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + keepalive: 0 + zombies: 0 + zone: hg-backend + NginxHTTPUpstream: + title: HTTP Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxHTTPUpstreamPeerMap' + keepalive: + type: integer + description: The current number of idle + keepalive + connections. + zombies: + type: integer + description: The current number of servers removed + from the group but still processing active client requests. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + queue: + type: object + description: > + For the requests + queue, + the following data are provided: + properties: + size: + type: integer + description: The current number of requests in the queue. + max_size: + type: integer + description: The maximum number of requests that can be in the queue + at the same time. + overflows: + type: integer + description: The total number of requests rejected due to the queue overflow. + example: + upstream_backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 20 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + codes: + 200: 666310 + 404: 915 + 503: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 20 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + codes: {} + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + keepalive: 0 + zombies: 0 + zone: upstream_backend + NginxHTTPUpstreamPeerMap: + title: HTTP Upstream Servers + description: | + An array of HTTP + upstream servers. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamPeer' + NginxHTTPUpstreamPeer: + title: HTTP Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + enum: + - up + - draining + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “draining”, “down”, + “unavail”, “checking”, + and “unhealthy”. + active: + type: integer + description: The current number of active connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + requests: + type: integer + description: The total number of client requests forwarded to this server. + readOnly: true + responses: + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + codes: + type: object + description: The number of responses per each status code. + readOnly: true + properties: + codeNumber: + type: integer + description: The number of responses with this particular status code. + readOnly: true + total: + type: integer + description: The total number of responses obtained from this server. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client requests + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + fails: + type: integer + description: The number of failed health checks. + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + last_passed: + type: boolean + description: Boolean indicating if the last health check request was successful + and passed + tests. + downtime: + type: integer + readOnly: true + description: Total time the server was in the “unavail”, + “checking”, and “unhealthy” states. + downstart: + type: string + format: date-time + readOnly: true + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + selected: + type: string + format: date-time + readOnly: true + description: The time when the server was last selected to process a request, + in the ISO 8601 format with millisecond resolution. + header_time: + type: integer + readOnly: true + description: The average time to get the + response header + from the server. + response_time: + type: integer + readOnly: true + description: The average time to get the + full response + from the server. + NginxHTTPUpstreamConfServerMap: + title: HTTP Upstream Servers + description: An array of HTTP upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:8088 + weight: 1 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: false + down: false + - id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPUpstreamConfServer: + title: HTTP Upstream Server + description: | + Dynamically configurable parameters of an HTTP upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the HTTP upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the HTTP upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “http” block. + See also the + resolve + parameter of the HTTP upstream server. + service: + type: string + description: Same as the + service + parameter of the HTTP upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the HTTP upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the HTTP upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the HTTP upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the HTTP upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the HTTP upstream server. + route: + type: string + description: Same as the + route + parameter of the HTTP upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the HTTP upstream server. + drain: + type: boolean + description: Same as the + drain + parameter of the HTTP upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPKeyvalZonesMap: + title: HTTP Keyval Shared Memory Zones + description: | + Contents of all HTTP keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxHTTPKeyvalZone: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxHTTPKeyvalZonePostPatch: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamServerZonesMap: + title: Stream Server Zones + description: | + Status information for all stream + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamServerZone' + example: + mysql-frontend: + processing: 2 + connections: 270925 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 270925 + discarded: 0 + received: 28988975 + sent: 3879346317 + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamServerZone: + title: Stream Server Zone + type: object + properties: + processing: + type: integer + description: The number of client connections + that are currently being processed. + connections: + type: integer + description: The total number of connections accepted from clients. + sessions: + type: object + description: The total number of completed sessions, + and the number of sessions completed with status codes + “2xx”, “4xx”, or “5xx”. + properties: + 2xx: + type: integer + description: The total number of sessions completed with + status codes + “2xx”. + 4xx: + type: integer + description: The total number of sessions completed with + status codes + “4xx”. + 5xx: + type: integer + description: The total number of sessions completed with + status codes + “5xx”. + total: + type: integer + description: The total number of completed client sessions. + discarded: + type: integer + description: The total number of + connections completed without creating a session. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + NginxStreamLimitConnZonesMap: + title: Stream limit_conns + description: | + Status data for all stream + limit_conn zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamLimitConnZone' + NginxStreamLimitConnZone: + title: Stream Connections Limiting + type: object + properties: + passed: + type: integer + description: The total number of connections that were neither limited + nor accounted as limited. + rejected: + type: integer + description: The total number of connections that were rejected. + rejected_dry_run: + type: integer + description: The total number of connections accounted as rejected in the + dry run + mode. + example: + passed: 15 + rejected: 0 + rejected_dry_run: 2 + NginxStreamUpstreamMap: + title: Stream Upstreams + description: Status information of stream upstream server groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamUpstream' + example: + mysql_backends: + peers: + - id: 0 + server: 10.0.0.1:12345 + name: 10.0.0.1:12345 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connecions: 1231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:12346 + name: 10.0.0.1:12346 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 30 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + zombies: 0 + zone: mysql_backends + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 30 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + connections: 0 + max_conns: 30 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstream: + title: Stream Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxStreamUpstreamPeerMap' + zombies: + type: integer + description: The current number of servers removed from the group + but still processing active client connections. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + example: + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + max_conns: 50 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 50 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2019-10-01T11:09:21.602Z + selected: 2019-10-01T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstreamPeerMap: + title: Stream Upstream Servers + description: Array of stream upstream servers. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamPeer' + NginxStreamUpstreamPeer: + title: Stream Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An + address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + format: hostname + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + readOnly: true + enum: + - up + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “down”, “unavail”, + “checking”, or “unhealthy”. + active: + type: integer + description: The current number of connections. + readOnly: true + max_conns: + type: integer + description: The + max_conns + limit for the server. + connections: + type: integer + description: The total number of client connections forwarded to this server. + readOnly: true + connect_time: + type: integer + description: The average time to connect to the upstream server. + readOnly: true + first_byte_time: + type: integer + description: The average time to receive the first byte of data. + readOnly: true + response_time: + type: integer + description: The average time to receive the last byte of data. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client connections + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + readOnly: true + fails: + type: integer + description: The number of failed health checks. + readOnly: true + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + readOnly: true + last_passed: + type: boolean + description: Boolean indicating whether the last health check request + was successful and passed + tests. + readOnly: true + downtime: + type: integer + description: Total time the server was in the + “unavail”, “checking”, + and “unhealthy” states. + readOnly: true + downstart: + type: string + format: date-time + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + readOnly: true + selected: + type: string + format: date-time + description: The time when the server was last selected + to process a connection, + in the ISO 8601 format with millisecond resolution. + readOnly: true + NginxStreamUpstreamConfServerMap: + title: Stream Upstream Servers + description: | + An array of stream upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + - id: 1 + server: 10.0.0.1:12349 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamUpstreamConfServer: + title: Stream Upstream Server + description: | + Dynamically configurable parameters of a stream upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the stream upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the stream upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “stream” block. + See also the + resolve + parameter of the stream upstream server. + service: + type: string + description: Same as the + service + parameter of the stream upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the stream upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the stream upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the stream upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the stream upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the stream upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the stream upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamKeyvalZonesMap: + title: Stream Keyval Shared Memory Zones + description: | + Contents of all stream keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxStreamKeyvalZone: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamKeyvalZonePostPatch: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamZoneSync: + title: Stream Zone Sync Node + type: object + properties: + zones: + type: object + title: Zone Sync Zones + description: Synchronization information per each shared memory zone. + additionalProperties: + $ref: '#/definitions/NginxStreamZoneSyncZone' + status: + type: object + description: Synchronization information per node in a cluster. + properties: + bytes_in: + type: integer + description: The number of bytes received by this node. + msgs_in: + type: integer + description: The number of messages received by this node. + msgs_out: + type: integer + description: The number of messages sent by this node. + bytes_out: + type: integer + description: The number of bytes sent by this node. + nodes_online: + type: integer + description: The number of peers this node is connected to. + example: + zones: + zone1: + records_pending: 2061 + records_total: 260575 + zone2: + records_pending: 0 + records_total: 14749 + status: + bytes_in: 1364923761 + msgs_in: 337236 + msgs_out: 346717 + bytes_out: 1402765472 + nodes_online: 15 + NginxStreamZoneSyncZone: + title: Sync Zone + description: Synchronization status of a shared memory zone. + type: object + properties: + records_pending: + type: integer + description: The number of records that need to be sent to the cluster. + records_total: + type: integer + description: The total number of records stored in the shared memory zone. + NginxResolverZonesMap: + title: Resolver Zones + description: | + Status data for all + resolver zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxResolverZone' + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + resolver_zone2: + requests: + name: 325460 + srv: 1130 + addr: 12580 + responses: + noerror: 226499 + formerr: 0 + servfail: 283 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 743 + unknown: 1478 + NginxResolverZone: + title: Resolver Zone + description: | + Statistics of DNS requests and responses per particular + resolver zone. + type: object + properties: + requests: + type: object + readOnly: true + properties: + name: + type: integer + description: The total number of requests + to resolve names to addresses. + readOnly: true + srv: + type: integer + description: The total number of requests + to resolve SRV records. + readOnly: true + addr: + type: integer + description: The total number of requests + to resolve addresses to names. + readOnly: true + responses: + type: object + readOnly: true + properties: + noerror: + type: integer + description: The total number of successful responses. + readOnly: true + formerr: + type: integer + description: The total number of + FORMERR (Format error) responses. + readOnly: true + servfail: + type: integer + description: The total number of + SERVFAIL (Server failure) responses. + readOnly: true + nxdomain: + type: integer + description: The total number of + NXDOMAIN (Host not found) responses. + readOnly: true + notimp: + type: integer + description: The total number of + NOTIMP (Unimplemented) responses. + readOnly: true + refused: + type: integer + description: The total number of + REFUSED (Operation refused) responses. + readOnly: true + timedout: + type: integer + description: The total number of timed out requests. + readOnly: true + unknown: + type: integer + description: The total number of requests + completed with an unknown error. + readOnly: true + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + NginxError: + title: Error + description: | + nginx error object. + type: object + properties: + error: + type: object + properties: + status: + type: integer + description: HTTP error code. + text: + type: string + description: Error description. + code: + type: string + description: Internal nginx error code. + request_id: + type: string + description: The ID of the request, equals the value of the + $request_id + variable. + href: + type: string + description: Link to reference documentation. diff --git a/content/nginx/admin-guide/yaml/v8/nginx_api.yaml b/content/nginx/admin-guide/yaml/v8/nginx_api.yaml new file mode 100644 index 000000000..4641b0f20 --- /dev/null +++ b/content/nginx/admin-guide/yaml/v8/nginx_api.yaml @@ -0,0 +1,4650 @@ +swagger: '2.0' +info: + version: '8.0' + title: NGINX Plus REST API + description: NGINX Plus REST + [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + provides access to NGINX Plus status information, + on-the-fly configuration of upstream servers and + key-value pairs management for + [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and + [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). +basePath: /api/8 +tags: + - name: General Info + - name: Processes + - name: Connections + - name: Slabs + - name: Resolvers + - name: SSL + - name: HTTP + - name: HTTP Requests + - name: HTTP Server Zones + - name: HTTP Location Zones + - name: HTTP Caches + - name: HTTP Limit Conns + - name: HTTP Limit Reqs + - name: HTTP Keyvals + - name: HTTP Upstreams + - name: Stream + - name: Stream Server Zones + - name: Stream Limit Conns + - name: Stream Keyvals + - name: Stream Upstreams + - name: Stream Zone Sync + - name: Method GET + - name: Method POST + - name: Method PATCH + - name: Method DELETE +schemes: + - http + - https +paths: + /: + get: + tags: + - General Info + - Method GET + summary: Return list of root endpoints + description: Returns a list of root endpoints. + operationId: getAPIEndpoints + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /nginx: + get: + tags: + - General Info + - Method GET + summary: Return status of nginx running instance + description: Returns nginx version, build name, address, + number of configuration reloads, IDs of master and worker processes. + operationId: getNginx + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of nginx running instance will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxObject' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /processes: + get: + tags: + - Processes + - Method GET + summary: Return nginx processes status + description: Returns the number of abnormally terminated + and respawned child processes. + operationId: getProcesses + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxProcesses' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Processes + - Method DELETE + summary: Reset nginx processes statistics + description: Resets counters of abnormally terminated and respawned + child processes. + operationId: deleteProcesses + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /connections: + get: + tags: + - Connections + - Method GET + summary: Return client connections statistics + description: Returns statistics of client connections. + operationId: getConnections + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxConnections' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the connections statistics will be output. + delete: + tags: + - Connections + - Method DELETE + summary: Reset client connections statistics + description: Resets statistics of accepted and dropped + client connections. + operationId: deleteConnections + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /slabs/: + get: + tags: + - Slabs + - Method GET + summary: Return status of all slabs + description: Returns status of slabs + for each shared memory zone with slab allocator. + operationId: getSlabs + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of slab zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZoneMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/slabs/{slabZoneName}': + parameters: + - name: slabZoneName + in: path + description: The name of the shared memory zone with slab allocator. + required: true + type: string + get: + tags: + - Slabs + - Method GET + summary: Return status of a slab + description: Returns status of slabs for a particular shared memory zone + with slab allocator. + operationId: getSlabZone + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the slab zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZone' + '404': + description: | + Slab not found (*SlabNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Slabs + - Method DELETE + summary: Reset slab statistics + description: Resets the “reqs” and “fails” + metrics for each memory slot. + operationId: deleteSlabZoneStats + responses: + '204': + description: Success + '404': + description: | + Slab not found (*SlabNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/: + get: + tags: + - HTTP + - Method GET + summary: Return list of HTTP-related endpoints + description: Returns a list of first level HTTP endpoints. + operationId: getHttp + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /http/requests: + get: + tags: + - HTTP Requests + - Method GET + summary: Return HTTP requests statistics + description: Returns status of client HTTP requests. + operationId: getHttpRequests + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of client HTTP requests statistics + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPRequests' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Requests + - Method DELETE + summary: Reset HTTP requests statistics + description: Resets the number of total client HTTP requests. + operationId: deleteHttpRequests + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/server_zones/: + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of all HTTP server zones + description: Returns status information for each HTTP + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getHttpServerZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/server_zones/{httpServerZoneName}': + parameters: + - name: httpServerZoneName + in: path + description: The name of an HTTP server zone. + type: string + required: true + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of an HTTP server zone + description: Returns status of a particular HTTP server zone. + operationId: getHttpServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Server Zones + - Method DELETE + summary: Reset statistics for an HTTP server zone + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes, counters of SSL handshakes and session reuses + in a particular HTTP server zone. + operationId: deleteHttpServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/location_zones/: + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of all HTTP location zones + description: Returns status information for each HTTP + [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of location zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/location_zones/{httpLocationZoneName}': + parameters: + - name: httpLocationZoneName + in: path + description: The name of an HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + type: string + required: true + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of an HTTP location zone + description: Returns status of a particular + HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the location zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZone' + '404': + description: | + Location zone not found (*LocationZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Location Zones + - Method DELETE + summary: Reset statistics for a location zone. + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular location zone. + operationId: deleteHttpLocationZoneStat + responses: + '204': + description: Success + '404': + description: | + Location zone not found (*LocationZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/caches/: + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of all caches + description: Returns status of each cache configured by + [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + and other “*_cache_path” directives. + operationId: getHttpCaches + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of cache zones will be output. + If the “fields” value is empty, + then only names of cache zones will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCachesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/caches/{httpCacheZoneName}': + parameters: + - name: httpCacheZoneName + in: path + description: The name of the cache zone. + type: string + required: true + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of a cache + description: Returns status of a particular cache. + operationId: getHttpCacheZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the cache zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCache' + '404': + description: | + Cache not found (*CacheNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Caches + - Method DELETE + summary: Reset cache statistics + description: Resets statistics of cache hits/misses in a particular cache zone. + operationId: deleteHttpCacheZoneStat + responses: + '204': + description: Success + '404': + description: | + Cache not found (*CacheNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/limit_conns/: + get: + tags: + - HTTP Limit Conns + - Method GET + summary: Return status of all HTTP limit_conn zones + description: Returns status information for each HTTP + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + operationId: getHttpLimitConnZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_conn zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitConnZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/limit_conns/{httpLimitConnZoneName}': + parameters: + - name: httpLimitConnZoneName + in: path + description: The name of a + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + type: string + required: true + get: + tags: + - HTTP Limit Conns + - Method GET + summary: Return status of an HTTP limit_conn zone + description: Returns status of a particular HTTP + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + operationId: getHttpLimitConnZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitConnZone' + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Limit Conns + - Method DELETE + summary: Reset statistics for an HTTP limit_conn zone + description: Resets the connection limiting statistics. + operationId: deleteHttpLimitConnZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/limit_reqs/: + get: + tags: + - HTTP Limit Reqs + - Method GET + summary: Return status of all HTTP limit_req zones + description: Returns status information for each HTTP + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + operationId: getHttpLimitReqZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_req zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitReqZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/limit_reqs/{httpLimitReqZoneName}': + parameters: + - name: httpLimitReqZoneName + in: path + description: The name of a + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + type: string + required: true + get: + tags: + - HTTP Limit Reqs + - Method GET + summary: Return status of an HTTP limit_req zone + description: Returns status of a particular HTTP + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + operationId: getHttpLimitReqZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitReqZone' + '404': + description: | + limit_req not found (*LimitReqNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Limit Reqs + - Method DELETE + summary: Reset statistics for an HTTP limit_req zone + description: Resets the requests limiting statistics. + operationId: deleteHttpLimitReqZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_req not found (*LimitReqNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/upstreams/: + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of all HTTP upstream server groups + description: Returns status of each HTTP upstream server group + and its servers. + operationId: getHttpUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an HTTP upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of an HTTP upstream server group + description: Returns status of a particular HTTP upstream server group + and its servers. + operationId: getHttpUpstreamName + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Reset statistics of an HTTP upstream server group + description: Resets the statistics for each upstream server + in an upstream server group and queue statistics. + operationId: deleteHttpUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of all servers in an HTTP upstream server group + description: Returns configuration of each server + in a particular HTTP upstream server group. + operationId: getHttpUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Upstreams + - Method POST + summary: Add a server to an HTTP upstream server group + description: Adds a new server to an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: postHttpUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postHttpUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Entry exists (*EntryExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/{httpUpstreamServerId}': + parameters: + - name: httpUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: httpUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of a server in an HTTP upstream server group + description: Returns configuration of a particular server + in the HTTP upstream server group. + operationId: getHttpUpstreamPeer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Upstreams + - Method PATCH + summary: Modify a server in an HTTP upstream server group + description: Modifies settings of a particular server + in an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: patchHttpUpstreamPeer + produces: + - application/json + parameters: + - in: body + name: patchHttpUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “*ID*” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Remove a server from an HTTP upstream server group + description: Removes a server from an HTTP upstream server group. + operationId: deleteHttpUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/keyvals/: + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from all HTTP keyval zones + description: Returns key-value pairs for each HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only HTTP keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/keyvals/{httpKeyvalZoneName}': + parameters: + - name: httpKeyvalZoneName + in: path + description: The name of an HTTP keyval shared memory zone. + required: true + type: string + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from an HTTP keyval zone + description: Returns key-value pairs stored in a particular HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the HTTP keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Keyvals + - Method POST + summary: Add a key-value pair to the HTTP keyval zone + description: Adds a new key-value pair to the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + operationId: postHttpKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: | + Entry exists (*EntryExists*), + key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchHttpKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: httpKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Keyvals + - Method DELETE + summary: Empty the HTTP keyval zone + description: Deletes all key-value pairs from the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteHttpKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/: + get: + tags: + - Stream + summary: Return list of stream-related endpoints + description: Returns a list of first level stream endpoints. + operationId: getStream + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /stream/server_zones/: + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of all stream server zones + description: Returns status information for each stream + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getStreamServerZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/server_zones/{streamServerZoneName}': + parameters: + - name: streamServerZoneName + in: path + description: The name of a stream server zone. + type: string + required: true + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of a stream server zone + description: Returns status of a particular stream server zone. + operationId: getStreamServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Server Zones + - Method DELETE + summary: Reset statistics for a stream server zone + description: Resets statistics of accepted and discarded connections, sessions, + received and sent bytes, counters of SSL handshakes and session reuses + in a particular stream server zone. + operationId: deleteStreamServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/limit_conns/: + get: + tags: + - Stream Limit Conns + - Method GET + summary: Return status of all stream limit_conn zones + description: Returns status information for each stream + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + operationId: getStreamLimitConnZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_conn zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamLimitConnZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/limit_conns/{streamLimitConnZoneName}': + parameters: + - name: streamLimitConnZoneName + in: path + description: The name of a + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + type: string + required: true + get: + tags: + - Stream Limit Conns + - Method GET + summary: Return status of an stream limit_conn zone + description: Returns status of a particular stream + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + operationId: getStreamLimitConnZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamLimitConnZone' + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Limit Conns + - Method DELETE + summary: Reset statistics for a stream limit_conn zone + description: Resets the connection limiting statistics. + operationId: deleteStreamLimitConnZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/upstreams/: + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of all stream upstream server groups + description: Returns status of each stream upstream server group + and its servers. + operationId: getStreamUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/': + parameters: + - name: streamUpstreamName + in: path + description: The name of a stream upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of a stream upstream server group + description: Returns status of a particular stream upstream server group + and its servers. + operationId: getStreamUpstream + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Reset statistics of a stream upstream server group + description: Resets the statistics for each upstream server + in an upstream server group. + operationId: deleteStreamUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/': + parameters: + - name: streamUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of all servers in a stream upstream server group + description: Returns configuration of each server + in a particular stream upstream server group. + operationId: getStreamUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Upstreams + - Method POST + summary: Add a server to a stream upstream server group + description: Adds a new server to a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: postStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postStreamUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Entry exists (*EntryExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/{streamUpstreamServerId}': + parameters: + - name: streamUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: streamUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of a server in a stream upstream server group + description: Returns configuration of a particular server + in the stream upstream server group. + operationId: getStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Upstreams + - Method PATCH + summary: Modify a server in a stream upstream server group + description: Modifies settings of a particular server + in a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: patchStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: patchStreamUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “**ID**” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Remove a server from a stream upstream server group + description: Removes a server from a stream server group. + operationId: deleteStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/keyvals/: + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from all stream keyval zones + description: Returns key-value pairs for each stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only stream keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/keyvals/{streamKeyvalZoneName}': + parameters: + - name: streamKeyvalZoneName + in: path + description: The name of a stream keyval shared memory zone. + required: true + type: string + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from a stream keyval zone + description: Returns key-value pairs stored in a particular stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the stream keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Keyvals + - Method POST + summary: Add a key-value pair to the stream keyval zone + description: Adds a new key-value pair to the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + operationId: postStreamKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: | + Entry exists (*EntryExists*), + key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time is specified in milliseconds + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchStreamKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: streamKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Keyvals + - Method DELETE + summary: Empty the stream keyval zone + description: Deletes all key-value pairs from the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteStreamKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/zone_sync/: + get: + tags: + - Stream Zone Sync + - Method GET + summary: Return sync status of a node + description: Returns synchronization status of a cluster node. + operationId: getStreamZoneSync + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamZoneSync' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /resolvers/: + get: + tags: + - Resolvers + - Method GET + summary: Return status for all resolver zones + description: Returns status information for each + [resolver zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZones + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of resolvers statistics will be output. + '/resolvers/{resolverZoneName}': + parameters: + - name: resolverZoneName + in: path + description: The name of a resolver zone. + required: true + type: string + get: + tags: + - Resolvers + - Method GET + summary: Return statistics of a resolver zone + description: Returns statistics stored in a particular resolver + [zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the resolver zone will be output + (requests, responses, or both). + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZone' + '404': + description: | + Resolver zone not found (*ResolverZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Resolvers + - Method DELETE + summary: Reset statistics for a resolver zone. + description: Resets statistics in a particular resolver zone. + operationId: deleteResolverZoneStat + responses: + '204': + description: Success + '404': + description: | + Resolver zone not found (*ResolverZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /ssl: + get: + tags: + - SSL + - Method GET + summary: Return SSL statistics + description: Returns SSL statistics. + operationId: getSsl + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSSLObject' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of SSL statistics will be output. + delete: + tags: + - SSL + - Method DELETE + summary: Reset SSL statistics + description: Resets counters of SSL handshakes and session reuses. + operationId: deleteSslStat + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' +### +###DEFINITIONS +### +definitions: + ArrayOfStrings: + title: Array + description: | + An array of strings. + type: array + items: + type: string + NginxObject: + title: nginx + description: | + General information about nginx: + type: object + properties: + version: + type: string + description: Version of nginx. + build: + type: string + description: Name of nginx build. + address: + type: string + description: The address of the server that accepted status request. + generation: + type: integer + description: The total number of configuration + reloads. + load_timestamp: + type: string + format: date-time + description: Time of the last reload of configuration, + in the ISO 8601 format with millisecond resolution. + timestamp: + type: string + format: date-time + description: Current time + in the ISO 8601 format with millisecond resolution. + pid: + type: integer + description: The ID of the worker process that handled status request. + ppid: + type: integer + description: The ID of the master process that started the + worker process. + example: + nginx: + version: 1.21.6 + build: nginx-plus-r27 + address: 206.251.255.64 + generation: 6 + load_timestamp: 2022-06-28T11:15:44.467Z + timestamp: 2022-06-28T09:26:07.305Z + pid: 32212 + ppid: 32210 + NginxProcesses: + title: Processes + type: object + properties: + respawned: + type: integer + description: The total number of abnormally terminated + and respawned child processes. + example: + respawned: 0 + NginxConnections: + title: Connections + description: | + The number of accepted, dropped, active, and idle connections. + type: object + properties: + accepted: + type: integer + description: The total number of accepted client connections. + dropped: + type: integer + description: The total number of dropped client connections. + active: + type: integer + description: The current number of active client connections. + idle: + type: integer + description: The current number of idle client connections. + example: + accepted: 4968119 + dropped: 0 + active: 5 + idle: 117 + NginxSSLObject: + title: SSL + type: object + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + no_common_cipher: + type: integer + description: The number of SSL handshakes failed + because of no shared cipher. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the client + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + no_cert: + type: integer + description: A client did not provide the required certificate. + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by a client. + revoked_cert: + type: integer + description: A revoked certificate was presented by a client. + hostname_mismatch: + type: integer + description: Server's certificate doesn't match the hostname. + other: + type: integer + description: Other SSL certificate verification errors. + example: + handshakes: 79572 + handshakes_failed: 21025 + session_reuses: 15762 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + NginxSlabZoneMap: + title: Slab Zones + description: | + Status zones that use slab allocator. + type: object + additionalProperties: + $ref: '#/definitions/NginxSlabZone' + example: + http_cache: + pages: + used: 2 + free: 2452 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 1 + free: 126 + reqs: 1 + fails: 0 + 64: + used: 2 + free: 62 + reqs: 2 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + dns-backends: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZone: + title: Shared memory zone with slab allocator + description: | + Shared memory zone with slab allocator + type: object + properties: + pages: + type: object + description: The number of free and used memory pages. + properties: + used: + type: integer + description: The current number of used memory pages. + free: + type: integer + description: The current number of free memory pages. + slots: + type: object + title: Memory Slots + description: Status data for memory slots (8, 16, 32, 64, 128, etc.) + additionalProperties: + $ref: '#/definitions/NginxSlabZoneSlot' + example: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZoneSlot: + title: Memory Slot + type: object + properties: + used: + type: integer + description: The current number of used memory slots. + free: + type: integer + description: The current number of free memory slots. + reqs: + type: integer + description: The total number of attempts + to allocate memory of specified size. + fails: + type: integer + description: The number of unsuccessful attempts + to allocate memory of specified size. + NginxHTTPRequests: + title: HTTP Requests + type: object + properties: + total: + type: integer + description: The total number of client requests. + current: + type: integer + description: The current number of client requests. + example: + total: 10624511 + current: 4 + NginxHTTPServerZonesMap: + title: HTTP Server Zones + description: | + Status data for all HTTP + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPServerZone' + example: + site1: + processing: 2 + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + codes: + 200: 727270 + 301: 4614 + 404: 930 + 503: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + ssl: + handshakes: 65432 + handshakes_failed: 421 + session_reuses: 4645 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + site2: + processing: 1 + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + codes: + 200: 112674 + 301: 45383 + 404: 2504 + 503: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + ssl: + handshakes: 104303 + handshakes_failed: 1421 + session_reuses: 54645 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + NginxHTTPServerZone: + title: HTTP Server Zone + type: object + properties: + processing: + type: integer + description: The number of client requests + that are currently being processed. + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients, the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”, and + the number of responses per each status code. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + codes: + type: object + description: The number of responses per each status code. + readOnly: true + properties: + codeNumber: + type: integer + description: The number of responses with this particular status code. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + ssl: + type: object + readOnly: true + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + readOnly: true + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + readOnly: true + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + readOnly: true + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + no_common_cipher: + type: integer + description: The number of SSL handshakes failed + because of no shared cipher. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the client + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + no_cert: + type: integer + description: A client did not provide the required certificate. + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by a client. + revoked_cert: + type: integer + description: A revoked certificate was presented by a client. + other: + type: integer + description: Other SSL certificate verification errors. + example: + processing: 1 + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + codes: + 200: 699482 + 301: 4522 + 404: 907 + 503: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + ssl: + handshakes: 104303 + handshakes_failed: 1421 + session_reuses: 54645 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + other: 1 + NginxHTTPLocationZonesMap: + title: HTTP Location Zones + description: | + Status data for all HTTP + location zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLocationZone' + example: + site1: + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + codes: + 200: 727290 + 301: 4614 + 404: 934 + 503: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + codes: + 200: 112674 + 301: 45383 + 404: 2504 + 503: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPLocationZone: + title: HTTP Location Zone + type: object + properties: + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients, the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”, and + the number of responses per each status code. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + codes: + type: object + description: The number of responses per each status code. + readOnly: true + properties: + codeNumber: + type: integer + description: The number of responses with this particular status code. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + codes: + 200: 112674 + 301: 4522 + 404: 2504 + 503: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPCachesMap: + title: HTTP Caches + description: | + Status information of all HTTP caches configured by + proxy_cache_path + and other “*_cache_path” directives. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPCache' + example: + http-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + frontend-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPCache: + title: HTTP Cache + type: object + properties: + size: + type: integer + description: The current size of the cache. + max_size: + type: integer + description: The limit on the maximum size of the cache + specified in the configuration. + cold: + type: boolean + description: A boolean value indicating whether the “cache loader” process + is still loading data from disk into the cache. + hit: + type: object + properties: + responses: + type: integer + description: The total number of + valid + responses read from the cache. + bytes: + type: integer + description: The total number of bytes read from the cache. + stale: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + updating: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache + while responses were being updated (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + revalidated: + type: object + properties: + responses: + type: integer + description: The total number of expired and revalidated responses + read from the cache (see + proxy_cache_revalidate + and other “*_cache_revalidate” directives. + bytes: + type: integer + description: The total number of bytes read from the cache. + miss: + type: object + properties: + responses: + type: integer + description: The total number of responses not found in the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + expired: + type: object + properties: + responses: + type: integer + description: The total number of expired responses not taken from the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + bypass: + type: object + properties: + responses: + type: integer + description: The total number of responses not looked up in the cache due to the + proxy_cache_bypass + and other “*_cache_bypass” directives. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + example: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPLimitConnZonesMap: + title: HTTP limit_conns + description: | + Status data for all HTTP + limit_conn zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLimitConnZone' + NginxHTTPLimitConnZone: + title: HTTP Connections Limiting + type: object + properties: + passed: + type: integer + description: The total number of connections that were neither limited + nor accounted as limited. + rejected: + type: integer + description: The total number of connections that were rejected. + rejected_dry_run: + type: integer + description: The total number of connections accounted as rejected in the + dry run + mode. + example: + passed: 15 + rejected: 0 + rejected_dry_run: 2 + NginxHTTPLimitReqZonesMap: + title: HTTP limit_reqs + description: | + Status data for all HTTP + limit_req zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLimitReqZone' + NginxHTTPLimitReqZone: + title: HTTP Requests Rate Limiting + type: object + properties: + passed: + type: integer + description: The total number of requests that were neither limited + nor accounted as limited. + delayed: + type: integer + description: The total number of requests that were delayed. + rejected: + type: integer + description: The total number of requests that were rejected. + delayed_dry_run: + type: integer + description: The total number of requests accounted as delayed in the + dry run + mode. + rejected_dry_run: + type: integer + description: The total number of requests accounted as rejected in the + dry run + mode. + example: + passed: 15 + delayed: 4 + rejected: 0 + delayed_dry_run: 1 + rejected_dry_run: 2 + NginxHTTPUpstreamMap: + title: HTTP Upstreams + description: | + Status information of all HTTP + dynamically configurable + groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPUpstream' + example: + trac-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 620311 + handshakes_failed: 3432 + session_reuses: 36442 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + codes: + 200: 666310 + 404: 915 + 503: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + codes: {} + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + keepalive: 0 + zombies: 0 + zone: trac-backend + hg-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 620311 + handshakes_failed: 3432 + session_reuses: 36442 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + codes: + 200: 666310 + 404: 915 + 503: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + codes: {} + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + keepalive: 0 + zombies: 0 + zone: hg-backend + NginxHTTPUpstream: + title: HTTP Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxHTTPUpstreamPeerMap' + keepalive: + type: integer + description: The current number of idle + keepalive + connections. + zombies: + type: integer + description: The current number of servers removed + from the group but still processing active client requests. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + queue: + type: object + description: > + For the requests + queue, + the following data are provided: + properties: + size: + type: integer + description: The current number of requests in the queue. + max_size: + type: integer + description: The maximum number of requests that can be in the queue + at the same time. + overflows: + type: integer + description: The total number of requests rejected due to the queue overflow. + example: + upstream_backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 620311 + handshakes_failed: 3432 + session_reuses: 36442 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + max_conns: 20 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + codes: + 200: 666310 + 404: 915 + 503: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 20 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + codes: {} + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + keepalive: 0 + zombies: 0 + zone: upstream_backend + NginxHTTPUpstreamPeerMap: + title: HTTP Upstream Servers + description: | + An array of HTTP + upstream servers. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamPeer' + NginxHTTPUpstreamPeer: + title: HTTP Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + enum: + - up + - draining + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “draining”, “down”, + “unavail”, “checking”, + and “unhealthy”. + active: + type: integer + description: The current number of active connections. + readOnly: true + ssl: + type: object + readOnly: true + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + readOnly: true + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + readOnly: true + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + readOnly: true + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the upstream server + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by an upstream server. + revoked_cert: + type: integer + description: A revoked certificate was presented by an upstream server. + hostname_mismatch: + type: integer + description: Server's certificate doesn't match the hostname. + other: + type: integer + description: Other SSL certificate verification errors. + max_conns: + type: integer + description: The + max_conns + limit for the server. + requests: + type: integer + description: The total number of client requests forwarded to this server. + readOnly: true + responses: + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + codes: + type: object + description: The number of responses per each status code. + readOnly: true + properties: + codeNumber: + type: integer + description: The number of responses with this particular status code. + readOnly: true + total: + type: integer + description: The total number of responses obtained from this server. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client requests + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + fails: + type: integer + description: The number of failed health checks. + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + last_passed: + type: boolean + description: Boolean indicating if the last health check request was successful + and passed + tests. + downtime: + type: integer + readOnly: true + description: Total time the server was in the “unavail”, + “checking”, and “unhealthy” states. + downstart: + type: string + format: date-time + readOnly: true + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + selected: + type: string + format: date-time + readOnly: true + description: The time when the server was last selected to process a request, + in the ISO 8601 format with millisecond resolution. + header_time: + type: integer + readOnly: true + description: The average time to get the + response header + from the server. + response_time: + type: integer + readOnly: true + description: The average time to get the + full response + from the server. + NginxHTTPUpstreamConfServerMap: + title: HTTP Upstream Servers + description: An array of HTTP upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:8088 + weight: 1 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: false + down: false + - id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPUpstreamConfServer: + title: HTTP Upstream Server + description: | + Dynamically configurable parameters of an HTTP upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the HTTP upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the HTTP upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “http” block. + See also the + resolve + parameter of the HTTP upstream server. + service: + type: string + description: Same as the + service + parameter of the HTTP upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the HTTP upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the HTTP upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the HTTP upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the HTTP upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the HTTP upstream server. + route: + type: string + description: Same as the + route + parameter of the HTTP upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the HTTP upstream server. + drain: + type: boolean + description: Same as the + drain + parameter of the HTTP upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPKeyvalZonesMap: + title: HTTP Keyval Shared Memory Zones + description: | + Contents of all HTTP keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxHTTPKeyvalZone: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxHTTPKeyvalZonePostPatch: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamServerZonesMap: + title: Stream Server Zones + description: | + Status information for all stream + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamServerZone' + example: + mysql-frontend: + processing: 2 + connections: 270925 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 270925 + discarded: 0 + received: 28988975 + sent: 3879346317 + ssl: + handshakes: 76455 + handshakes_failed: 432 + session_reuses: 28770 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + other: 1 + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + ssl: + handshakes: 2040 + handshakes_failed: 23 + session_reuses: 65 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + other: 1 + NginxStreamServerZone: + title: Stream Server Zone + type: object + properties: + processing: + type: integer + description: The number of client connections + that are currently being processed. + connections: + type: integer + description: The total number of connections accepted from clients. + sessions: + type: object + description: The total number of completed sessions, + and the number of sessions completed with status codes + “2xx”, “4xx”, or “5xx”. + properties: + 2xx: + type: integer + description: The total number of sessions completed with + status codes + “2xx”. + 4xx: + type: integer + description: The total number of sessions completed with + status codes + “4xx”. + 5xx: + type: integer + description: The total number of sessions completed with + status codes + “5xx”. + total: + type: integer + description: The total number of completed client sessions. + discarded: + type: integer + description: The total number of + connections completed without creating a session. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + ssl: + type: object + readOnly: true + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + readOnly: true + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + readOnly: true + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + readOnly: true + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + no_common_cipher: + type: integer + description: The number of SSL handshakes failed + because of no shared cipher. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the client + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + no_cert: + type: integer + description: A client did not provide the required certificate. + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by a client. + revoked_cert: + type: integer + description: A revoked certificate was presented by a client. + other: + type: integer + description: Other SSL certificate verification errors. + example: + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + ssl: + handshakes: 76455 + handshakes_failed: 432 + session_reuses: 28770 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + other: 1 + NginxStreamLimitConnZonesMap: + title: Stream limit_conns + description: | + Status data for all stream + limit_conn zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamLimitConnZone' + NginxStreamLimitConnZone: + title: Stream Connections Limiting + type: object + properties: + passed: + type: integer + description: The total number of connections that were neither limited + nor accounted as limited. + rejected: + type: integer + description: The total number of connections that were rejected. + rejected_dry_run: + type: integer + description: The total number of connections accounted as rejected in the + dry run + mode. + example: + passed: 15 + rejected: 0 + rejected_dry_run: 2 + NginxStreamUpstreamMap: + title: Stream Upstreams + description: Status information of stream upstream server groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamUpstream' + example: + mysql_backends: + peers: + - id: 0 + server: 10.0.0.1:12345 + name: 10.0.0.1:12345 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 1045 + handshakes_failed: 89 + session_reuses: 321 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + max_conns: 30 + connecions: 1231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:12346 + name: 10.0.0.1:12346 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 30 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + zombies: 0 + zone: mysql_backends + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 5268 + handshakes_failed: 121 + session_reuses: 854 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + max_conns: 30 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + connections: 0 + max_conns: 30 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstream: + title: Stream Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxStreamUpstreamPeerMap' + zombies: + type: integer + description: The current number of servers removed from the group + but still processing active client connections. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + example: + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 200 + handshakes_failed: 4 + session_reuses: 189 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + max_conns: 50 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 50 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstreamPeerMap: + title: Stream Upstream Servers + description: Array of stream upstream servers. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamPeer' + NginxStreamUpstreamPeer: + title: Stream Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An + address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + format: hostname + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + readOnly: true + enum: + - up + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “down”, “unavail”, + “checking”, or “unhealthy”. + active: + type: integer + description: The current number of connections. + readOnly: true + ssl: + type: object + readOnly: true + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + readOnly: true + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + readOnly: true + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + readOnly: true + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the upstream server + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by an upstream server. + revoked_cert: + type: integer + description: A revoked certificate was presented by an upstream server. + hostname_mismatch: + type: integer + description: Server's certificate doesn't match the hostname. + other: + type: integer + description: Other SSL certificate verification errors. + max_conns: + type: integer + description: The + max_conns + limit for the server. + connections: + type: integer + description: The total number of client connections forwarded to this server. + readOnly: true + connect_time: + type: integer + description: The average time to connect to the upstream server. + readOnly: true + first_byte_time: + type: integer + description: The average time to receive the first byte of data. + readOnly: true + response_time: + type: integer + description: The average time to receive the last byte of data. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client connections + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + readOnly: true + fails: + type: integer + description: The number of failed health checks. + readOnly: true + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + readOnly: true + last_passed: + type: boolean + description: Boolean indicating whether the last health check request + was successful and passed + tests. + readOnly: true + downtime: + type: integer + description: Total time the server was in the + “unavail”, “checking”, + and “unhealthy” states. + readOnly: true + downstart: + type: string + format: date-time + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + readOnly: true + selected: + type: string + format: date-time + description: The time when the server was last selected + to process a connection, + in the ISO 8601 format with millisecond resolution. + readOnly: true + NginxStreamUpstreamConfServerMap: + title: Stream Upstream Servers + description: | + An array of stream upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + - id: 1 + server: 10.0.0.1:12349 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamUpstreamConfServer: + title: Stream Upstream Server + description: | + Dynamically configurable parameters of a stream upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the stream upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the stream upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “stream” block. + See also the + resolve + parameter of the stream upstream server. + service: + type: string + description: Same as the + service + parameter of the stream upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the stream upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the stream upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the stream upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the stream upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the stream upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the stream upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamKeyvalZonesMap: + title: Stream Keyval Shared Memory Zones + description: | + Contents of all stream keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxStreamKeyvalZone: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamKeyvalZonePostPatch: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamZoneSync: + title: Stream Zone Sync Node + type: object + properties: + zones: + type: object + title: Zone Sync Zones + description: Synchronization information per each shared memory zone. + additionalProperties: + $ref: '#/definitions/NginxStreamZoneSyncZone' + status: + type: object + description: Synchronization information per node in a cluster. + properties: + bytes_in: + type: integer + description: The number of bytes received by this node. + msgs_in: + type: integer + description: The number of messages received by this node. + msgs_out: + type: integer + description: The number of messages sent by this node. + bytes_out: + type: integer + description: The number of bytes sent by this node. + nodes_online: + type: integer + description: The number of peers this node is connected to. + example: + zones: + zone1: + records_pending: 2061 + records_total: 260575 + zone2: + records_pending: 0 + records_total: 14749 + status: + bytes_in: 1364923761 + msgs_in: 337236 + msgs_out: 346717 + bytes_out: 1402765472 + nodes_online: 15 + NginxStreamZoneSyncZone: + title: Sync Zone + description: Synchronization status of a shared memory zone. + type: object + properties: + records_pending: + type: integer + description: The number of records that need to be sent to the cluster. + records_total: + type: integer + description: The total number of records stored in the shared memory zone. + NginxResolverZonesMap: + title: Resolver Zones + description: | + Status data for all + resolver zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxResolverZone' + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + resolver_zone2: + requests: + name: 325460 + srv: 1130 + addr: 12580 + responses: + noerror: 226499 + formerr: 0 + servfail: 283 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 743 + unknown: 1478 + NginxResolverZone: + title: Resolver Zone + description: | + Statistics of DNS requests and responses per particular + resolver zone. + type: object + properties: + requests: + type: object + readOnly: true + properties: + name: + type: integer + description: The total number of requests + to resolve names to addresses. + readOnly: true + srv: + type: integer + description: The total number of requests + to resolve SRV records. + readOnly: true + addr: + type: integer + description: The total number of requests + to resolve addresses to names. + readOnly: true + responses: + type: object + readOnly: true + properties: + noerror: + type: integer + description: The total number of successful responses. + readOnly: true + formerr: + type: integer + description: The total number of + FORMERR (Format error) responses. + readOnly: true + servfail: + type: integer + description: The total number of + SERVFAIL (Server failure) responses. + readOnly: true + nxdomain: + type: integer + description: The total number of + NXDOMAIN (Host not found) responses. + readOnly: true + notimp: + type: integer + description: The total number of + NOTIMP (Unimplemented) responses. + readOnly: true + refused: + type: integer + description: The total number of + REFUSED (Operation refused) responses. + readOnly: true + timedout: + type: integer + description: The total number of timed out requests. + readOnly: true + unknown: + type: integer + description: The total number of requests + completed with an unknown error. + readOnly: true + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + NginxError: + title: Error + description: | + nginx error object. + type: object + properties: + error: + type: object + properties: + status: + type: integer + description: HTTP error code. + text: + type: string + description: Error description. + code: + type: string + description: Internal nginx error code. + request_id: + type: string + description: The ID of the request, equals the value of the + $request_id + variable. + href: + type: string + description: Link to reference documentation. diff --git a/content/nginx/admin-guide/yaml/v9/nginx_api.yaml b/content/nginx/admin-guide/yaml/v9/nginx_api.yaml new file mode 100644 index 000000000..94bb4991a --- /dev/null +++ b/content/nginx/admin-guide/yaml/v9/nginx_api.yaml @@ -0,0 +1,4850 @@ +swagger: '2.0' +info: + version: '9.0' + title: NGINX Plus REST API + description: NGINX Plus REST + [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + provides access to NGINX Plus status information, + on-the-fly configuration of upstream servers and + key-value pairs management for + [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and + [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). +basePath: /api/9 +tags: + - name: General Info + - name: Processes + - name: Connections + - name: Workers + - name: Slabs + - name: Resolvers + - name: SSL + - name: HTTP + - name: HTTP Requests + - name: HTTP Server Zones + - name: HTTP Location Zones + - name: HTTP Caches + - name: HTTP Limit Conns + - name: HTTP Limit Reqs + - name: HTTP Keyvals + - name: HTTP Upstreams + - name: Stream + - name: Stream Server Zones + - name: Stream Limit Conns + - name: Stream Keyvals + - name: Stream Upstreams + - name: Stream Zone Sync + - name: Method GET + - name: Method POST + - name: Method PATCH + - name: Method DELETE +schemes: + - http + - https +paths: + /: + get: + tags: + - General Info + - Method GET + summary: Return list of root endpoints + description: Returns a list of root endpoints. + operationId: getAPIEndpoints + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /nginx: + get: + tags: + - General Info + - Method GET + summary: Return status of nginx running instance + description: Returns nginx version, build name, address, + number of configuration reloads, IDs of master and worker processes. + operationId: getNginx + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of nginx running instance will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxObject' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /processes: + get: + tags: + - Processes + - Method GET + summary: Return nginx processes status + description: Returns the number of abnormally terminated + and respawned child processes. + operationId: getProcesses + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxProcesses' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Processes + - Method DELETE + summary: Reset nginx processes statistics + description: Resets counters of abnormally terminated and respawned + child processes. + operationId: deleteProcesses + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /connections: + get: + tags: + - Connections + - Method GET + summary: Return client connections statistics + description: Returns statistics of client connections. + operationId: getConnections + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxConnections' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the connections statistics will be output. + delete: + tags: + - Connections + - Method DELETE + summary: Reset client connections statistics + description: Resets statistics of accepted and dropped + client connections. + operationId: deleteConnections + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /slabs/: + get: + tags: + - Slabs + - Method GET + summary: Return status of all slabs + description: Returns status of slabs + for each shared memory zone with slab allocator. + operationId: getSlabs + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of slab zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZoneMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/slabs/{slabZoneName}': + parameters: + - name: slabZoneName + in: path + description: The name of the shared memory zone with slab allocator. + required: true + type: string + get: + tags: + - Slabs + - Method GET + summary: Return status of a slab + description: Returns status of slabs for a particular shared memory zone + with slab allocator. + operationId: getSlabZone + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of the slab zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSlabZone' + '404': + description: | + Slab not found (*SlabNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Slabs + - Method DELETE + summary: Reset slab statistics + description: Resets the “reqs” and “fails” + metrics for each memory slot. + operationId: deleteSlabZoneStats + responses: + '204': + description: Success + '404': + description: | + Slab not found (*SlabNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/: + get: + tags: + - HTTP + - Method GET + summary: Return list of HTTP-related endpoints + description: Returns a list of first level HTTP endpoints. + operationId: getHttp + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /http/requests: + get: + tags: + - HTTP Requests + - Method GET + summary: Return HTTP requests statistics + description: Returns status of client HTTP requests. + operationId: getHttpRequests + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of client HTTP requests statistics + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPRequests' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Requests + - Method DELETE + summary: Reset HTTP requests statistics + description: Resets the number of total client HTTP requests. + operationId: deleteHttpRequests + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/server_zones/: + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of all HTTP server zones + description: Returns status information for each HTTP + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getHttpServerZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/server_zones/{httpServerZoneName}': + parameters: + - name: httpServerZoneName + in: path + description: The name of an HTTP server zone. + type: string + required: true + get: + tags: + - HTTP Server Zones + - Method GET + summary: Return status of an HTTP server zone + description: Returns status of a particular HTTP server zone. + operationId: getHttpServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Server Zones + - Method DELETE + summary: Reset statistics for an HTTP server zone + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes, counters of SSL handshakes and session reuses + in a particular HTTP server zone. + operationId: deleteHttpServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/location_zones/: + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of all HTTP location zones + description: Returns status information for each HTTP + [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of location zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/location_zones/{httpLocationZoneName}': + parameters: + - name: httpLocationZoneName + in: path + description: The name of an HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + type: string + required: true + get: + tags: + - HTTP Location Zones + - Method GET + summary: Return status of an HTTP location zone + description: Returns status of a particular + HTTP [location zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone_location). + operationId: getHttpLocationZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the location zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLocationZone' + '404': + description: | + Location zone not found (*LocationZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Location Zones + - Method DELETE + summary: Reset statistics for a location zone. + description: Resets statistics of accepted and discarded requests, responses, + received and sent bytes in a particular location zone. + operationId: deleteHttpLocationZoneStat + responses: + '204': + description: Success + '404': + description: | + Location zone not found (*LocationZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/caches/: + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of all caches + description: Returns status of each cache configured by + [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + and other “*_cache_path” directives. + operationId: getHttpCaches + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of cache zones will be output. + If the “fields” value is empty, + then only names of cache zones will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCachesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/caches/{httpCacheZoneName}': + parameters: + - name: httpCacheZoneName + in: path + description: The name of the cache zone. + type: string + required: true + get: + tags: + - HTTP Caches + - Method GET + summary: Return status of a cache + description: Returns status of a particular cache. + operationId: getHttpCacheZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the cache zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPCache' + '404': + description: | + Cache not found (*CacheNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Caches + - Method DELETE + summary: Reset cache statistics + description: Resets statistics of cache hits/misses in a particular cache zone. + operationId: deleteHttpCacheZoneStat + responses: + '204': + description: Success + '404': + description: | + Cache not found (*CacheNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/limit_conns/: + get: + tags: + - HTTP Limit Conns + - Method GET + summary: Return status of all HTTP limit_conn zones + description: Returns status information for each HTTP + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + operationId: getHttpLimitConnZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_conn zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitConnZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/limit_conns/{httpLimitConnZoneName}': + parameters: + - name: httpLimitConnZoneName + in: path + description: The name of a + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + type: string + required: true + get: + tags: + - HTTP Limit Conns + - Method GET + summary: Return status of an HTTP limit_conn zone + description: Returns status of a particular HTTP + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). + operationId: getHttpLimitConnZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_conn zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitConnZone' + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Limit Conns + - Method DELETE + summary: Reset statistics for an HTTP limit_conn zone + description: Resets the connection limiting statistics. + operationId: deleteHttpLimitConnZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/limit_reqs/: + get: + tags: + - HTTP Limit Reqs + - Method GET + summary: Return status of all HTTP limit_req zones + description: Returns status information for each HTTP + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + operationId: getHttpLimitReqZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_req zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitReqZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/limit_reqs/{httpLimitReqZoneName}': + parameters: + - name: httpLimitReqZoneName + in: path + description: The name of a + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + type: string + required: true + get: + tags: + - HTTP Limit Reqs + - Method GET + summary: Return status of an HTTP limit_req zone + description: Returns status of a particular HTTP + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone). + operationId: getHttpLimitReqZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_req zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPLimitReqZone' + '404': + description: | + limit_req not found (*LimitReqNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Limit Reqs + - Method DELETE + summary: Reset statistics for an HTTP limit_req zone + description: Resets the requests limiting statistics. + operationId: deleteHttpLimitReqZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_req not found (*LimitReqNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/upstreams/: + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of all HTTP upstream server groups + description: Returns status of each HTTP upstream server group + and its servers. + operationId: getHttpUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an HTTP upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return status of an HTTP upstream server group + description: Returns status of a particular HTTP upstream server group + and its servers. + operationId: getHttpUpstreamName + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Reset statistics of an HTTP upstream server group + description: Resets the statistics for each upstream server + in an upstream server group and queue statistics. + operationId: deleteHttpUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/': + parameters: + - name: httpUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of all servers in an HTTP upstream server group + description: Returns configuration of each server + in a particular HTTP upstream server group. + operationId: getHttpUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Upstreams + - Method POST + summary: Add a server to an HTTP upstream server group + description: Adds a new server to an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: postHttpUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postHttpUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Entry exists (*EntryExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/http/upstreams/{httpUpstreamName}/servers/{httpUpstreamServerId}': + parameters: + - name: httpUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: httpUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - HTTP Upstreams + - Method GET + summary: Return configuration of a server in an HTTP upstream server group + description: Returns configuration of a particular server + in the HTTP upstream server group. + operationId: getHttpUpstreamPeer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Upstreams + - Method PATCH + summary: Modify a server in an HTTP upstream server group + description: Modifies settings of a particular server + in an HTTP upstream server group. + Server parameters are specified in the JSON format. + operationId: patchHttpUpstreamPeer + produces: + - application/json + parameters: + - in: body + name: patchHttpUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + route is too long (*UpstreamBadRoute*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “*ID*” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Upstreams + - Method DELETE + summary: Remove a server from an HTTP upstream server group + description: Removes a server from an HTTP upstream server group. + operationId: deleteHttpUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /http/keyvals/: + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from all HTTP keyval zones + description: Returns key-value pairs for each HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only HTTP keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/http/keyvals/{httpKeyvalZoneName}': + parameters: + - name: httpKeyvalZoneName + in: path + description: The name of an HTTP keyval shared memory zone. + required: true + type: string + get: + tags: + - HTTP Keyvals + - Method GET + summary: Return key-value pairs from an HTTP keyval zone + description: Returns key-value pairs stored in a particular HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + operationId: getHttpKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the HTTP keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxHTTPKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - HTTP Keyvals + - Method POST + summary: Add a key-value pair to the HTTP keyval zone + description: Adds a new key-value pair to the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + operationId: postHttpKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the HTTP keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: | + Entry exists (*EntryExists*), + key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - HTTP Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchHttpKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: httpKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxHTTPKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - HTTP Keyvals + - Method DELETE + summary: Empty the HTTP keyval zone + description: Deletes all key-value pairs from the HTTP keyval shared memory + [zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteHttpKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/: + get: + tags: + - Stream + summary: Return list of stream-related endpoints + description: Returns a list of first level stream endpoints. + operationId: getStream + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/ArrayOfStrings' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /stream/server_zones/: + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of all stream server zones + description: Returns status information for each stream + [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone). + operationId: getStreamServerZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of server zones will be output. + If the “fields” value is empty, + then only server zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/server_zones/{streamServerZoneName}': + parameters: + - name: streamServerZoneName + in: path + description: The name of a stream server zone. + type: string + required: true + get: + tags: + - Stream Server Zones + - Method GET + summary: Return status of a stream server zone + description: Returns status of a particular stream server zone. + operationId: getStreamServerZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the server zone will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamServerZone' + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Server Zones + - Method DELETE + summary: Reset statistics for a stream server zone + description: Resets statistics of accepted and discarded connections, sessions, + received and sent bytes, counters of SSL handshakes and session reuses + in a particular stream server zone. + operationId: deleteStreamServerZoneStat + responses: + '204': + description: Success + '404': + description: | + Server zone not found (*ServerZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/limit_conns/: + get: + tags: + - Stream Limit Conns + - Method GET + summary: Return status of all stream limit_conn zones + description: Returns status information for each stream + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + operationId: getStreamLimitConnZones + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of limit_conn zones will be output. + If the “fields” value is empty, + then only zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamLimitConnZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/limit_conns/{streamLimitConnZoneName}': + parameters: + - name: streamLimitConnZoneName + in: path + description: The name of a + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + type: string + required: true + get: + tags: + - Stream Limit Conns + - Method GET + summary: Return status of an stream limit_conn zone + description: Returns status of a particular stream + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone). + operationId: getStreamLimitConnZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the + [limit_conn zone](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone) + will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamLimitConnZone' + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Limit Conns + - Method DELETE + summary: Reset statistics for a stream limit_conn zone + description: Resets the connection limiting statistics. + operationId: deleteStreamLimitConnZoneStat + responses: + '204': + description: Success + '404': + description: | + limit_conn not found (*LimitConnNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/upstreams/: + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of all stream upstream server groups + description: Returns status of each stream upstream server group + and its servers. + operationId: getStreamUpstreams + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of upstream server groups will be output. + If the “fields” value is empty, + only names of upstreams will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/': + parameters: + - name: streamUpstreamName + in: path + description: The name of a stream upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return status of a stream upstream server group + description: Returns status of a particular stream upstream server group + and its servers. + operationId: getStreamUpstream + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the upstream server group will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstream' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Reset statistics of a stream upstream server group + description: Resets the statistics for each upstream server + in an upstream server group. + operationId: deleteStreamUpstreamStat + produces: + - application/json + responses: + '204': + description: Success + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/': + parameters: + - name: streamUpstreamName + in: path + description: The name of an upstream server group. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of all servers in a stream upstream server group + description: Returns configuration of each server + in a particular stream upstream server group. + operationId: getStreamUpstreamServers + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: Upstream is static (*UpstreamStatic*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Upstreams + - Method POST + summary: Add a server to a stream upstream server group + description: Adds a new server to a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: postStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: postStreamUpstreamServer + description: Address of a new server and other optional parameters + in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '201': + description: Created + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + missing “*server*” argument (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + service upstream “*host*” may not have port (*UpstreamBadAddress*), + service upstream “*host*” requires domain name (*UpstreamBadAddress*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + “*service*” is empty (*UpstreamBadService*), + no resolver defined to resolve (*UpstreamConfNoResolver*), + upstream “**name**” has no backup (*UpstreamNoBackup*), + upstream “**name**” memory exhausted (*UpstreamOutOfMemory*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: Entry exists (*EntryExists*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + '/stream/upstreams/{streamUpstreamName}/servers/{streamUpstreamServerId}': + parameters: + - name: streamUpstreamName + in: path + description: The name of the upstream server group. + required: true + type: string + - name: streamUpstreamServerId + in: path + description: The ID of the server. + required: true + type: string + get: + tags: + - Stream Upstreams + - Method GET + summary: Return configuration of a server in a stream upstream server group + description: Returns configuration of a particular server + in the stream upstream server group. + operationId: getStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*), + server with ID “**id**” does not exist (*UpstreamServerNotFound*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Upstreams + - Method PATCH + summary: Modify a server in a stream upstream server group + description: Modifies settings of a particular server + in a stream upstream server group. + Server parameters are specified in the JSON format. + operationId: patchStreamUpstreamServer + produces: + - application/json + parameters: + - in: body + name: patchStreamUpstreamServer + description: Server parameters, specified in the JSON format. + The “*ID*”, “*backup*”, and “*service*” parameters + cannot be changed. + required: true + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid “**parameter**” value (*UpstreamConfFormatError*), + unknown parameter “**name**” (*UpstreamConfFormatError*), + nested object or list (*UpstreamConfFormatError*), + “*error*” while parsing (*UpstreamBadAddress*), + invalid “*server*” argument (*UpstreamBadAddress*), + no port in server “*host*” (*UpstreamBadAddress*), + invalid server ID (*UpstreamBadServerId*), + invalid “*weight*” (*UpstreamBadWeight*), + invalid “*max_conns*” (*UpstreamBadMaxConns*), + invalid “*max_fails*” (*UpstreamBadMaxFails*), + invalid “*fail_timeout*” (*UpstreamBadFailTimeout*), + invalid “*slow_start*” (*UpstreamBadSlowStart*), + reading request body failed *BodyReadError*), + “*service*” is empty (*UpstreamBadService*), + server “**ID**” address is immutable (*UpstreamServerImmutable*), + server “**ID**” weight is immutable (*UpstreamServerWeightImmutable*), + upstream “*name*” memory exhausted (*UpstreamOutOfMemory*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Upstreams + - Method DELETE + summary: Remove a server from a stream upstream server group + description: Removes a server from a stream server group. + operationId: deleteStreamUpstreamServer + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamUpstreamConfServerMap' + '400': + description: | + Upstream is static (*UpstreamStatic*), + invalid server ID (*UpstreamBadServerId*), + server “**id**” not removable (*UpstreamServerImmutable*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Server with ID “**id**” does not exist (*UpstreamServerNotFound*), + unknown version (*UnknownVersion*), + upstream not found (*UpstreamNotFound*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/keyvals/: + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from all stream keyval zones + description: Returns key-value pairs for each stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZones + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: If the “fields” value is empty, + then only stream keyval zone names will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '/stream/keyvals/{streamKeyvalZoneName}': + parameters: + - name: streamKeyvalZoneName + in: path + description: The name of a stream keyval shared memory zone. + required: true + type: string + get: + tags: + - Stream Keyvals + - Method GET + summary: Return key-value pairs from a stream keyval zone + description: Returns key-value pairs stored in a particular stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + operationId: getStreamKeyvalZoneKeysValues + produces: + - application/json + parameters: + - name: key + in: query + type: string + description: Get a particular key-value pair from the stream keyval zone. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamKeyvalZone' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + post: + tags: + - Stream Keyvals + - Method POST + summary: Add a key-value pair to the stream keyval zone + description: Adds a new key-value pair to the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + operationId: postStreamKeyvalZoneData + produces: + - application/json + parameters: + - in: body + name: Key-value + description: A key-value pair is specified in the JSON format. + Several key-value pairs can be entered + if the stream keyval shared memory zone is empty. + Expiration time in milliseconds can be specified for a key-value pair + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '201': + description: Created + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + invalid key format (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be added (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '409': + description: | + Entry exists (*EntryExists*), + key already exists (*KeyvalKeyExists*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + patch: + tags: + - Stream Keyvals + - Method PATCH + summary: Modify a key-value or delete a key + description: Changes the value of the selected key in the key-value pair, + deletes a key by setting the key value to null, + changes expiration time of a key-value pair. + If + synchronization + of keyval zones in a cluster is enabled, + deletes a key only on a target cluster node. + Expiration time is specified in milliseconds + with the *expire* parameter + which overrides the [*timeout*](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_timeout) + parameter of the + keyval_zone + directive. + operationId: patchStreamKeyvalZoneKeyValue + produces: + - application/json + parameters: + - in: body + name: streamKeyvalZoneKeyValue + description: A new value for the key is specified in the JSON format. + required: true + schema: + $ref: '#/definitions/NginxStreamKeyvalZonePostPatch' + responses: + '204': + description: Success + '400': + description: | + Invalid JSON (*KeyvalFormatError*), + key required (*KeyvalFormatError*), + keyval timeout is not enabled (*KeyvalFormatError*), + only one key can be updated (*KeyvalFormatError*), + reading request body failed *BodyReadError*) + schema: + $ref: '#/definitions/NginxError' + '404': + description: | + Keyval not found (*KeyvalNotFound*), + keyval key not found (*KeyvalKeyNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '413': + description: Request Entity Too Large + schema: + $ref: '#/definitions/NginxError' + '415': + description: JSON error (*JsonError*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Stream Keyvals + - Method DELETE + summary: Empty the stream keyval zone + description: Deletes all key-value pairs from the stream keyval shared memory + [zone](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone). + If + synchronization + of keyval zones in a cluster is enabled, + empties the keyval zone only on a target cluster node. + operationId: deleteStreamKeyvalZoneData + responses: + '204': + description: Success + '404': + description: | + Keyval not found (*KeyvalNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /stream/zone_sync/: + get: + tags: + - Stream Zone Sync + - Method GET + summary: Return sync status of a node + description: Returns synchronization status of a cluster node. + operationId: getStreamZoneSync + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxStreamZoneSync' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + /resolvers/: + get: + tags: + - Resolvers + - Method GET + summary: Return status for all resolver zones + description: Returns status information for each + [resolver zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZones + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZonesMap' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of resolvers statistics will be output. + '/resolvers/{resolverZoneName}': + parameters: + - name: resolverZoneName + in: path + description: The name of a resolver zone. + required: true + type: string + get: + tags: + - Resolvers + - Method GET + summary: Return statistics of a resolver zone + description: Returns statistics stored in a particular resolver + [zone](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_status_zone). + operationId: getResolverZone + produces: + - application/json + parameters: + - name: fields + in: query + type: string + description: Limits which fields of the resolver zone will be output + (requests, responses, or both). + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxResolverZone' + '404': + description: | + Resolver zone not found (*ResolverZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Resolvers + - Method DELETE + summary: Reset statistics for a resolver zone. + description: Resets statistics in a particular resolver zone. + operationId: deleteResolverZoneStat + responses: + '204': + description: Success + '404': + description: | + Resolver zone not found (*ResolverZoneNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /ssl: + get: + tags: + - SSL + - Method GET + summary: Return SSL statistics + description: Returns SSL statistics. + operationId: getSsl + produces: + - application/json + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxSSLObject' + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + parameters: + - in: query + name: fields + type: string + description: Limits which fields of SSL statistics will be output. + delete: + tags: + - SSL + - Method DELETE + summary: Reset SSL statistics + description: Resets counters of SSL handshakes and session reuses. + operationId: deleteSslStat + responses: + '204': + description: Success + '404': + description: Unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + /workers/: + get: + tags: + - Workers + - Method GET + summary: Return statistics for all worker processes + description: | + Returns statistics for all worker processes such as + accepted, dropped, active, idle connections, total and current requests. + operationId: getWorkers + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of worker process statistics will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxWorkersMap' + '404': + description: | + Worker not found (*WorkerNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Workers + - Method DELETE + summary: Reset statistics for all worker processes. + description: | + Resets statistics for all worker processes such as + accepted, dropped, active, idle connections, total and current requests. + operationId: deleteWorkerStat + produces: + - application/json + responses: + '204': + description: Success + '404': + description: | + Worker not found (*WorkerNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' + '/workers/{workerId}': + parameters: + - name: workerId + in: path + description: The ID of the worker process. + required: true + type: string + get: + tags: + - Workers + - Method GET + summary: Return status of a worker process + description: Returns status of a particular worker process. + operationId: getWorker + produces: + - application/json + parameters: + - in: query + name: fields + type: string + description: Limits which fields of worker process statistics will be output. + responses: + '200': + description: Success + schema: + $ref: '#/definitions/NginxWorker' + '404': + description: | + Worker not found (*WorkerNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + delete: + tags: + - Workers + - Method DELETE + summary: Reset statistics for a worker process. + description: | + Resets statistics of accepted, dropped, active, idle connections, + as well as total and current requests. + operationId: deleteWorkerIdStat + produces: + - application/json + responses: + '204': + description: Success + '404': + description: | + Worker not found (*WorkerNotFound*), + unknown version (*UnknownVersion*) + schema: + $ref: '#/definitions/NginxError' + '405': + description: Method disabled (*MethodDisabled*) + schema: + $ref: '#/definitions/NginxError' +### +###DEFINITIONS +### +definitions: + ArrayOfStrings: + title: Array + description: | + An array of strings. + type: array + items: + type: string + NginxObject: + title: nginx + description: | + General information about nginx: + type: object + properties: + version: + type: string + description: Version of nginx. + build: + type: string + description: Name of nginx build. + address: + type: string + description: The address of the server that accepted status request. + generation: + type: integer + description: The total number of configuration + reloads. + load_timestamp: + type: string + format: date-time + description: Time of the last reload of configuration, + in the ISO 8601 format with millisecond resolution. + timestamp: + type: string + format: date-time + description: Current time + in the ISO 8601 format with millisecond resolution. + pid: + type: integer + description: The ID of the worker process that handled status request. + ppid: + type: integer + description: The ID of the master process that started the + worker process. + example: + nginx: + version: 1.21.6 + build: nginx-plus-r27 + address: 206.251.255.64 + generation: 6 + load_timestamp: 2022-06-28T11:15:44.467Z + timestamp: 2022-06-28T09:26:07.305Z + pid: 32212 + ppid: 32210 + NginxProcesses: + title: Processes + type: object + properties: + respawned: + type: integer + description: The total number of abnormally terminated + and respawned child processes. + example: + respawned: 0 + NginxConnections: + title: Connections + description: | + The number of accepted, dropped, active, and idle connections. + type: object + properties: + accepted: + type: integer + description: The total number of accepted client connections. + dropped: + type: integer + description: The total number of dropped client connections. + active: + type: integer + description: The current number of active client connections. + idle: + type: integer + description: The current number of idle client connections. + example: + accepted: 4968119 + dropped: 0 + active: 5 + idle: 117 + NginxSSLObject: + title: SSL + type: object + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + no_common_cipher: + type: integer + description: The number of SSL handshakes failed + because of no shared cipher. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the client + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + no_cert: + type: integer + description: A client did not provide the required certificate. + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by a client. + revoked_cert: + type: integer + description: A revoked certificate was presented by a client. + hostname_mismatch: + type: integer + description: Server's certificate doesn't match the hostname. + other: + type: integer + description: Other SSL certificate verification errors. + example: + handshakes: 79572 + handshakes_failed: 21025 + session_reuses: 15762 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + NginxSlabZoneMap: + title: Slab Zones + description: | + Status zones that use slab allocator. + type: object + additionalProperties: + $ref: '#/definitions/NginxSlabZone' + example: + http_cache: + pages: + used: 2 + free: 2452 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 1 + free: 126 + reqs: 1 + fails: 0 + 64: + used: 2 + free: 62 + reqs: 2 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + dns-backends: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZone: + title: Shared memory zone with slab allocator + description: | + Shared memory zone with slab allocator + type: object + properties: + pages: + type: object + description: The number of free and used memory pages. + properties: + used: + type: integer + description: The current number of used memory pages. + free: + type: integer + description: The current number of free memory pages. + slots: + type: object + title: Memory Slots + description: Status data for memory slots (8, 16, 32, 64, 128, etc.) + additionalProperties: + $ref: '#/definitions/NginxSlabZoneSlot' + example: + pages: + used: 1143 + free: 2928 + slots: + 8: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 16: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 32: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 64: + used: 1 + free: 63 + reqs: 1 + fails: 0 + 128: + used: 0 + free: 0 + reqs: 0 + fails: 0 + 256: + used: 18078 + free: 178 + reqs: 1635736 + fails: 0 + NginxSlabZoneSlot: + title: Memory Slot + type: object + properties: + used: + type: integer + description: The current number of used memory slots. + free: + type: integer + description: The current number of free memory slots. + reqs: + type: integer + description: The total number of attempts + to allocate memory of specified size. + fails: + type: integer + description: The number of unsuccessful attempts + to allocate memory of specified size. + NginxHTTPRequests: + title: HTTP Requests + type: object + properties: + total: + type: integer + description: The total number of client requests. + current: + type: integer + description: The current number of client requests. + example: + total: 10624511 + current: 4 + NginxHTTPServerZonesMap: + title: HTTP Server Zones + description: | + Status data for all HTTP + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPServerZone' + example: + site1: + processing: 2 + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + codes: + 200: 727270 + 301: 4614 + 404: 930 + 503: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + ssl: + handshakes: 65432 + handshakes_failed: 421 + session_reuses: 4645 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + site2: + processing: 1 + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + codes: + 200: 112674 + 301: 45383 + 404: 2504 + 503: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + ssl: + handshakes: 104303 + handshakes_failed: 1421 + session_reuses: 54645 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + NginxHTTPServerZone: + title: HTTP Server Zone + type: object + properties: + processing: + type: integer + description: The number of client requests + that are currently being processed. + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients, the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”, and + the number of responses per each status code. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + codes: + type: object + description: The number of responses per each status code. + readOnly: true + properties: + codeNumber: + type: integer + description: The number of responses with this particular status code. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + ssl: + type: object + readOnly: true + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + readOnly: true + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + readOnly: true + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + readOnly: true + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + no_common_cipher: + type: integer + description: The number of SSL handshakes failed + because of no shared cipher. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the client + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + no_cert: + type: integer + description: A client did not provide the required certificate. + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by a client. + revoked_cert: + type: integer + description: A revoked certificate was presented by a client. + other: + type: integer + description: Other SSL certificate verification errors. + example: + processing: 1 + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + codes: + 200: 699482 + 301: 4522 + 404: 907 + 503: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + ssl: + handshakes: 104303 + handshakes_failed: 1421 + session_reuses: 54645 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + other: 1 + NginxHTTPLocationZonesMap: + title: HTTP Location Zones + description: | + Status data for all HTTP + location zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLocationZone' + example: + site1: + requests: 736395 + responses: + 1xx: 0 + 2xx: 727290 + 3xx: 4614 + 4xx: 934 + 5xx: 1535 + codes: + 200: 727290 + 301: 4614 + 404: 934 + 503: 1535 + total: 734373 + discarded: 2020 + received: 180157219 + sent: 20183175459 + site2: + requests: 185307 + responses: + 1xx: 0 + 2xx: 112674 + 3xx: 45383 + 4xx: 2504 + 5xx: 4419 + codes: + 200: 112674 + 301: 45383 + 404: 2504 + 503: 4419 + total: 164980 + discarded: 20326 + received: 51575327 + sent: 2983241510 + NginxHTTPLocationZone: + title: HTTP Location Zone + type: object + properties: + requests: + type: integer + description: The total number of client requests received from clients. + responses: + description: The total number of responses sent to clients, the + number of responses with status codes + “1xx”, “2xx”, “3xx”, + “4xx”, and “5xx”, and + the number of responses per each status code. + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + codes: + type: object + description: The number of responses per each status code. + readOnly: true + properties: + codeNumber: + type: integer + description: The number of responses with this particular status code. + readOnly: true + total: + type: integer + description: The total number of responses sent to clients. + readOnly: true + discarded: + type: integer + description: The total number of + requests completed without sending a response. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + example: + requests: 706690 + responses: + 1xx: 0 + 2xx: 699482 + 3xx: 4522 + 4xx: 907 + 5xx: 266 + codes: + 200: 112674 + 301: 4522 + 404: 2504 + 503: 266 + total: 705177 + discarded: 1513 + received: 172711587 + sent: 19415530115 + NginxHTTPCachesMap: + title: HTTP Caches + description: | + Status information of all HTTP caches configured by + proxy_cache_path + and other “*_cache_path” directives. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPCache' + example: + http-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + frontend-cache: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPCache: + title: HTTP Cache + type: object + properties: + size: + type: integer + description: The current size of the cache. + max_size: + type: integer + description: The limit on the maximum size of the cache + specified in the configuration. + cold: + type: boolean + description: A boolean value indicating whether the “cache loader” process + is still loading data from disk into the cache. + hit: + type: object + properties: + responses: + type: integer + description: The total number of + valid + responses read from the cache. + bytes: + type: integer + description: The total number of bytes read from the cache. + stale: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + updating: + type: object + properties: + responses: + type: integer + description: The total number of expired responses read from the cache + while responses were being updated (see + proxy_cache_use_stale + and other “*_cache_use_stale” directives). + bytes: + type: integer + description: The total number of bytes read from the cache. + revalidated: + type: object + properties: + responses: + type: integer + description: The total number of expired and revalidated responses + read from the cache (see + proxy_cache_revalidate + and other “*_cache_revalidate” directives. + bytes: + type: integer + description: The total number of bytes read from the cache. + miss: + type: object + properties: + responses: + type: integer + description: The total number of responses not found in the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + expired: + type: object + properties: + responses: + type: integer + description: The total number of expired responses not taken from the cache. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + bypass: + type: object + properties: + responses: + type: integer + description: The total number of responses not looked up in the cache due to the + proxy_cache_bypass + and other “*_cache_bypass” directives. + bytes: + type: integer + description: The total number of bytes read from the proxied server. + responses_written: + type: integer + description: The total number of responses written to the cache. + bytes_written: + type: integer + description: The total number of bytes written to the cache. + example: + size: 530915328 + max_size: 536870912 + cold: false + hit: + responses: 254032 + bytes: 6685627875 + stale: + responses: 0 + bytes: 0 + updating: + responses: 0 + bytes: 0 + revalidated: + responses: 0 + bytes: 0 + miss: + responses: 1619201 + bytes: 53841943822 + expired: + responses: 45859 + bytes: 1656847080 + responses_written: 44992 + bytes_written: 1641825173 + bypass: + responses: 200187 + bytes: 5510647548 + responses_written: 200173 + bytes_written: 44992 + NginxHTTPLimitConnZonesMap: + title: HTTP limit_conns + description: | + Status data for all HTTP + limit_conn zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLimitConnZone' + NginxHTTPLimitConnZone: + title: HTTP Connections Limiting + type: object + properties: + passed: + type: integer + description: The total number of connections that were neither limited + nor accounted as limited. + rejected: + type: integer + description: The total number of connections that were rejected. + rejected_dry_run: + type: integer + description: The total number of connections accounted as rejected in the + dry run + mode. + example: + passed: 15 + rejected: 0 + rejected_dry_run: 2 + NginxHTTPLimitReqZonesMap: + title: HTTP limit_reqs + description: | + Status data for all HTTP + limit_req zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPLimitReqZone' + NginxHTTPLimitReqZone: + title: HTTP Requests Rate Limiting + type: object + properties: + passed: + type: integer + description: The total number of requests that were neither limited + nor accounted as limited. + delayed: + type: integer + description: The total number of requests that were delayed. + rejected: + type: integer + description: The total number of requests that were rejected. + delayed_dry_run: + type: integer + description: The total number of requests accounted as delayed in the + dry run + mode. + rejected_dry_run: + type: integer + description: The total number of requests accounted as rejected in the + dry run + mode. + example: + passed: 15 + delayed: 4 + rejected: 0 + delayed_dry_run: 1 + rejected_dry_run: 2 + NginxHTTPUpstreamMap: + title: HTTP Upstreams + description: | + Status information of all HTTP + dynamically configurable + groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPUpstream' + example: + trac-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 620311 + handshakes_failed: 3432 + session_reuses: 36442 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + codes: + 200: 666310 + 404: 915 + 503: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + codes: {} + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + keepalive: 0 + zombies: 0 + zone: trac-backend + hg-backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 620311 + handshakes_failed: 3432 + session_reuses: 36442 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + codes: + 200: 666310 + 404: 915 + 503: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + codes: {} + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + keepalive: 0 + zombies: 0 + zone: hg-backend + NginxHTTPUpstream: + title: HTTP Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxHTTPUpstreamPeerMap' + keepalive: + type: integer + description: The current number of idle + keepalive + connections. + zombies: + type: integer + description: The current number of servers removed + from the group but still processing active client requests. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + queue: + type: object + description: > + For the requests + queue, + the following data are provided: + properties: + size: + type: integer + description: The current number of requests in the queue. + max_size: + type: integer + description: The maximum number of requests that can be in the queue + at the same time. + overflows: + type: integer + description: The total number of requests rejected due to the queue overflow. + example: + upstream_backend: + peers: + - id: 0 + server: 10.0.0.1:8088 + name: 10.0.0.1:8088 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 620311 + handshakes_failed: 3432 + session_reuses: 36442 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + max_conns: 20 + requests: 667231 + header_time: 20 + response_time: 36 + responses: + 1xx: 0 + 2xx: 666310 + 3xx: 0 + 4xx: 915 + 5xx: 6 + codes: + 200: 666310 + 404: 915 + 503: 6 + total: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:8089 + name: 10.0.0.1:8089 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 20 + requests: 0 + responses: + 1xx: 0 + 2xx: 0 + 3xx: 0 + 4xx: 0 + 5xx: 0 + codes: {} + total: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + keepalive: 0 + zombies: 0 + zone: upstream_backend + NginxHTTPUpstreamPeerMap: + title: HTTP Upstream Servers + description: | + An array of HTTP + upstream servers. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamPeer' + NginxHTTPUpstreamPeer: + title: HTTP Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + enum: + - up + - draining + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “draining”, “down”, + “unavail”, “checking”, + and “unhealthy”. + active: + type: integer + description: The current number of active connections. + readOnly: true + ssl: + type: object + readOnly: true + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + readOnly: true + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + readOnly: true + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + readOnly: true + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the upstream server + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by an upstream server. + revoked_cert: + type: integer + description: A revoked certificate was presented by an upstream server. + hostname_mismatch: + type: integer + description: Server's certificate doesn't match the hostname. + other: + type: integer + description: Other SSL certificate verification errors. + max_conns: + type: integer + description: The + max_conns + limit for the server. + requests: + type: integer + description: The total number of client requests forwarded to this server. + readOnly: true + responses: + type: object + readOnly: true + properties: + 1xx: + type: integer + description: The number of responses with “1xx” status codes. + readOnly: true + 2xx: + type: integer + description: The number of responses with “2xx” status codes. + readOnly: true + 3xx: + type: integer + description: The number of responses with “3xx” status codes. + readOnly: true + 4xx: + type: integer + description: The number of responses with “4xx” status codes. + readOnly: true + 5xx: + type: integer + description: The number of responses with “5xx” status codes. + readOnly: true + codes: + type: object + description: The number of responses per each status code. + readOnly: true + properties: + codeNumber: + type: integer + description: The number of responses with this particular status code. + readOnly: true + total: + type: integer + description: The total number of responses obtained from this server. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client requests + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + fails: + type: integer + description: The number of failed health checks. + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + last_passed: + type: boolean + description: Boolean indicating if the last health check request was successful + and passed + tests. + downtime: + type: integer + readOnly: true + description: Total time the server was in the “unavail”, + “checking”, and “unhealthy” states. + downstart: + type: string + format: date-time + readOnly: true + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + selected: + type: string + format: date-time + readOnly: true + description: The time when the server was last selected to process a request, + in the ISO 8601 format with millisecond resolution. + header_time: + type: integer + readOnly: true + description: The average time to get the + response header + from the server. + response_time: + type: integer + readOnly: true + description: The average time to get the + full response + from the server. + NginxHTTPUpstreamConfServerMap: + title: HTTP Upstream Servers + description: An array of HTTP upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxHTTPUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:8088 + weight: 1 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: false + down: false + - id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPUpstreamConfServer: + title: HTTP Upstream Server + description: | + Dynamically configurable parameters of an HTTP upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the HTTP upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the HTTP upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “http” block. + See also the + resolve + parameter of the HTTP upstream server. + service: + type: string + description: Same as the + service + parameter of the HTTP upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the HTTP upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the HTTP upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the HTTP upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the HTTP upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the HTTP upstream server. + route: + type: string + description: Same as the + route + parameter of the HTTP upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the HTTP upstream server. + drain: + type: boolean + description: Same as the + drain + parameter of the HTTP upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 1 + server: 10.0.0.1:8089 + weight: 4 + max_conns: 0 + max_fails: 0 + fail_timeout: 10s + slow_start: 10s + route: '' + backup: true + down: true + NginxHTTPKeyvalZonesMap: + title: HTTP Keyval Shared Memory Zones + description: | + Contents of all HTTP keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxHTTPKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxHTTPKeyvalZone: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxHTTPKeyvalZonePostPatch: + title: HTTP Keyval Shared Memory Zone + description: | + Contents of an HTTP keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamServerZonesMap: + title: Stream Server Zones + description: | + Status information for all stream + status zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamServerZone' + example: + mysql-frontend: + processing: 2 + connections: 270925 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 270925 + discarded: 0 + received: 28988975 + sent: 3879346317 + ssl: + handshakes: 76455 + handshakes_failed: 432 + session_reuses: 28770 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + other: 1 + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + ssl: + handshakes: 2040 + handshakes_failed: 23 + session_reuses: 65 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + other: 1 + NginxStreamServerZone: + title: Stream Server Zone + type: object + properties: + processing: + type: integer + description: The number of client connections + that are currently being processed. + connections: + type: integer + description: The total number of connections accepted from clients. + sessions: + type: object + description: The total number of completed sessions, + and the number of sessions completed with status codes + “2xx”, “4xx”, or “5xx”. + properties: + 2xx: + type: integer + description: The total number of sessions completed with + status codes + “2xx”. + 4xx: + type: integer + description: The total number of sessions completed with + status codes + “4xx”. + 5xx: + type: integer + description: The total number of sessions completed with + status codes + “5xx”. + total: + type: integer + description: The total number of completed client sessions. + discarded: + type: integer + description: The total number of + connections completed without creating a session. + received: + type: integer + description: The total number of bytes received from clients. + sent: + type: integer + description: The total number of bytes sent to clients. + ssl: + type: object + readOnly: true + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + readOnly: true + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + readOnly: true + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + readOnly: true + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + no_common_cipher: + type: integer + description: The number of SSL handshakes failed + because of no shared cipher. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the client + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + no_cert: + type: integer + description: A client did not provide the required certificate. + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by a client. + revoked_cert: + type: integer + description: A revoked certificate was presented by a client. + other: + type: integer + description: Other SSL certificate verification errors. + example: + dns: + processing: 1 + connections: 155569 + sessions: + 2xx: 155564 + 4xx: 0 + 5xx: 0 + total: 155569 + discarded: 0 + received: 4200363 + sent: 20489184 + ssl: + handshakes: 76455 + handshakes_failed: 432 + session_reuses: 28770 + no_common_protocol: 4 + no_common_cipher: 2 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + no_cert: 0 + expired_cert: 2 + revoked_cert: 1 + other: 1 + NginxStreamLimitConnZonesMap: + title: Stream limit_conns + description: | + Status data for all stream + limit_conn zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamLimitConnZone' + NginxStreamLimitConnZone: + title: Stream Connections Limiting + type: object + properties: + passed: + type: integer + description: The total number of connections that were neither limited + nor accounted as limited. + rejected: + type: integer + description: The total number of connections that were rejected. + rejected_dry_run: + type: integer + description: The total number of connections accounted as rejected in the + dry run + mode. + example: + passed: 15 + rejected: 0 + rejected_dry_run: 2 + NginxStreamUpstreamMap: + title: Stream Upstreams + description: Status information of stream upstream server groups. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamUpstream' + example: + mysql_backends: + peers: + - id: 0 + server: 10.0.0.1:12345 + name: 10.0.0.1:12345 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 1045 + handshakes_failed: 89 + session_reuses: 321 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + max_conns: 30 + connecions: 1231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:12346 + name: 10.0.0.1:12346 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 30 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + zombies: 0 + zone: mysql_backends + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 5268 + handshakes_failed: 121 + session_reuses: 854 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + max_conns: 30 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + connections: 0 + max_conns: 30 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstream: + title: Stream Upstream + type: object + properties: + peers: + $ref: '#/definitions/NginxStreamUpstreamPeerMap' + zombies: + type: integer + description: The current number of servers removed from the group + but still processing active client connections. + zone: + type: string + description: The name of the shared memory + zone + that keeps the group’s configuration and run-time state. + example: + dns: + peers: + - id: 0 + server: 10.0.0.1:12347 + name: 10.0.0.1:12347 + backup: false + weight: 5 + state: up + active: 0 + ssl: + handshakes: 200 + handshakes_failed: 4 + session_reuses: 189 + no_common_protocol: 4 + handshake_timeout: 0 + peer_rejected_cert: 0 + verify_failures: + expired_cert: 2 + revoked_cert: 1 + hostname_mismatch: 2 + other: 1 + max_conns: 50 + connections: 667231 + sent: 251946292 + received: 19222475454 + fails: 0 + unavail: 0 + health_checks: + checks: 26214 + fails: 0 + unhealthy: 0 + last_passed: true + downtime: 0 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + - id: 1 + server: 10.0.0.1:12348 + name: 10.0.0.1:12348 + backup: true + weight: 1 + state: unhealthy + active: 0 + max_conns: 50 + connections: 0 + sent: 0 + received: 0 + fails: 0 + unavail: 0 + health_checks: + checks: 26284 + fails: 26284 + unhealthy: 1 + last_passed: false + downtime: 262925617 + downstart: 2022-06-28T11:09:21.602Z + selected: 2022-06-28T15:01:25Z + zombies: 0 + zone: dns + NginxStreamUpstreamPeerMap: + title: Stream Upstream Servers + description: Array of stream upstream servers. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamPeer' + NginxStreamUpstreamPeer: + title: Stream Upstream Server + type: object + properties: + id: + type: integer + description: The ID of the server. + readOnly: true + server: + type: string + description: An + address + of the server. + service: + type: string + description: The + service + parameter value of the + server + directive. + name: + type: string + format: hostname + description: The name of the server specified in the + server + directive. + readOnly: true + backup: + type: boolean + description: A boolean value indicating whether the server is a + backup + server. + weight: + type: integer + description: Weight + of the server. + state: + type: string + readOnly: true + enum: + - up + - down + - unavail + - checking + - unhealthy + description: Current state, which may be one of + “up”, “down”, “unavail”, + “checking”, or “unhealthy”. + active: + type: integer + description: The current number of connections. + readOnly: true + ssl: + type: object + readOnly: true + properties: + handshakes: + type: integer + description: The total number of successful SSL handshakes. + readOnly: true + handshakes_failed: + type: integer + description: The total number of failed SSL handshakes. + readOnly: true + session_reuses: + type: integer + description: The total number of session reuses during SSL handshake. + readOnly: true + no_common_protocol: + type: integer + description: The number of SSL handshakes failed + because of no common protocol. + handshake_timeout: + type: integer + description: The number of SSL handshakes failed + because of a timeout. + peer_rejected_cert: + type: integer + description: The number of failed SSL handshakes + when nginx presented the certificate to the upstream server + but it was rejected with a corresponding alert message. + verify_failures: + type: object + description: SSL certificate verification errors + properties: + expired_cert: + type: integer + description: An expired or not yet valid certificate + was presented by an upstream server. + revoked_cert: + type: integer + description: A revoked certificate was presented by an upstream server. + hostname_mismatch: + type: integer + description: Server's certificate doesn't match the hostname. + other: + type: integer + description: Other SSL certificate verification errors. + max_conns: + type: integer + description: The + max_conns + limit for the server. + connections: + type: integer + description: The total number of client connections forwarded to this server. + readOnly: true + connect_time: + type: integer + description: The average time to connect to the upstream server. + readOnly: true + first_byte_time: + type: integer + description: The average time to receive the first byte of data. + readOnly: true + response_time: + type: integer + description: The average time to receive the last byte of data. + readOnly: true + sent: + type: integer + description: The total number of bytes sent to this server. + readOnly: true + received: + type: integer + description: The total number of bytes received from this server. + readOnly: true + fails: + type: integer + description: The total number of unsuccessful attempts + to communicate with the server. + readOnly: true + unavail: + type: integer + description: How many times the server became unavailable for client connections + (state “unavail”) due to the number of unsuccessful + attempts reaching the + max_fails + threshold. + readOnly: true + health_checks: + type: object + readOnly: true + properties: + checks: + type: integer + description: The total number of + health check + requests made. + readOnly: true + fails: + type: integer + description: The number of failed health checks. + readOnly: true + unhealthy: + type: integer + description: How many times the server became unhealthy + (state “unhealthy”). + readOnly: true + last_passed: + type: boolean + description: Boolean indicating whether the last health check request + was successful and passed + tests. + readOnly: true + downtime: + type: integer + description: Total time the server was in the + “unavail”, “checking”, + and “unhealthy” states. + readOnly: true + downstart: + type: string + format: date-time + description: The time when the server became + “unavail”, “checking”, + or “unhealthy”, + in the ISO 8601 format with millisecond resolution. + readOnly: true + selected: + type: string + format: date-time + description: The time when the server was last selected + to process a connection, + in the ISO 8601 format with millisecond resolution. + readOnly: true + NginxStreamUpstreamConfServerMap: + title: Stream Upstream Servers + description: | + An array of stream upstream servers for dynamic configuration. + type: array + items: + $ref: '#/definitions/NginxStreamUpstreamConfServer' + example: + - id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + - id: 1 + server: 10.0.0.1:12349 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamUpstreamConfServer: + title: Stream Upstream Server + description: | + Dynamically configurable parameters of a stream upstream + server: + type: object + properties: + id: + type: integer + description: The ID of the stream upstream server. + The ID is assigned automatically and cannot be changed. + readOnly: true + server: + type: string + description: Same as the + address + parameter of the stream upstream server. + When adding a server, it is possible to specify it as a domain name. + In this case, changes of the IP addresses + that correspond to a domain name will be monitored and automatically + applied to the upstream configuration + without the need of restarting nginx. + This requires the + resolver + directive in the “stream” block. + See also the + resolve + parameter of the stream upstream server. + service: + type: string + description: Same as the + service + parameter of the stream upstream server. + This parameter cannot be changed. + readOnly: true + weight: + type: integer + description: Same as the + weight + parameter of the stream upstream server. + max_conns: + type: integer + description: Same as the + max_conns + parameter of the stream upstream server. + max_fails: + type: integer + description: Same as the + max_fails + parameter of the stream upstream server. + fail_timeout: + type: string + description: Same as the + fail_timeout + parameter of the stream upstream server. + slow_start: + type: string + description: Same as the + slow_start + parameter of the stream upstream server. + backup: + type: boolean + description: When true, adds a + backup + server. + This parameter cannot be changed. + readOnly: true + down: + type: boolean + description: Same as the + down + parameter of the stream upstream server. + parent: + type: string + description: Parent server ID of the resolved server. + The ID is assigned automatically and cannot be changed. + readOnly: true + host: + type: string + description: Hostname of the resolved server. + The hostname is assigned automatically and cannot be changed. + readOnly: true + example: + id: 0 + server: 10.0.0.1:12348 + weight: 1 + max_conns: 0 + max_fails: 1 + fail_timeout: 10s + slow_start: 0 + backup: false + down: false + NginxStreamKeyvalZonesMap: + title: Stream Keyval Shared Memory Zones + description: | + Contents of all stream keyval shared memory zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxStreamKeyvalZone' + example: + keyval_zone: + key1: value1 + key2: value2 + key3: value3 + one: + arg1: value1 + arg2: value2 + arg3: value3 + NginxStreamKeyvalZone: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone when using the GET method. + type: object + example: + key1: value1 + key2: value2 + key3: value3 + NginxStreamKeyvalZonePostPatch: + title: Stream Keyval Shared Memory Zone + description: | + Contents of a stream keyval shared memory zone + when using the POST or PATCH methods. + type: object + example: + key1: value1 + key2: value2 + key3: + value: value3 + expire: 30000 + NginxStreamZoneSync: + title: Stream Zone Sync Node + type: object + properties: + zones: + type: object + title: Zone Sync Zones + description: Synchronization information per each shared memory zone. + additionalProperties: + $ref: '#/definitions/NginxStreamZoneSyncZone' + status: + type: object + description: Synchronization information per node in a cluster. + properties: + bytes_in: + type: integer + description: The number of bytes received by this node. + msgs_in: + type: integer + description: The number of messages received by this node. + msgs_out: + type: integer + description: The number of messages sent by this node. + bytes_out: + type: integer + description: The number of bytes sent by this node. + nodes_online: + type: integer + description: The number of peers this node is connected to. + example: + zones: + zone1: + records_pending: 2061 + records_total: 260575 + zone2: + records_pending: 0 + records_total: 14749 + status: + bytes_in: 1364923761 + msgs_in: 337236 + msgs_out: 346717 + bytes_out: 1402765472 + nodes_online: 15 + NginxStreamZoneSyncZone: + title: Sync Zone + description: Synchronization status of a shared memory zone. + type: object + properties: + records_pending: + type: integer + description: The number of records that need to be sent to the cluster. + records_total: + type: integer + description: The total number of records stored in the shared memory zone. + NginxResolverZonesMap: + title: Resolver Zones + description: | + Status data for all + resolver zones. + type: object + additionalProperties: + $ref: '#/definitions/NginxResolverZone' + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + resolver_zone2: + requests: + name: 325460 + srv: 1130 + addr: 12580 + responses: + noerror: 226499 + formerr: 0 + servfail: 283 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 743 + unknown: 1478 + NginxResolverZone: + title: Resolver Zone + description: | + Statistics of DNS requests and responses per particular + resolver zone. + type: object + properties: + requests: + type: object + readOnly: true + properties: + name: + type: integer + description: The total number of requests + to resolve names to addresses. + readOnly: true + srv: + type: integer + description: The total number of requests + to resolve SRV records. + readOnly: true + addr: + type: integer + description: The total number of requests + to resolve addresses to names. + readOnly: true + responses: + type: object + readOnly: true + properties: + noerror: + type: integer + description: The total number of successful responses. + readOnly: true + formerr: + type: integer + description: The total number of + FORMERR (Format error) responses. + readOnly: true + servfail: + type: integer + description: The total number of + SERVFAIL (Server failure) responses. + readOnly: true + nxdomain: + type: integer + description: The total number of + NXDOMAIN (Host not found) responses. + readOnly: true + notimp: + type: integer + description: The total number of + NOTIMP (Unimplemented) responses. + readOnly: true + refused: + type: integer + description: The total number of + REFUSED (Operation refused) responses. + readOnly: true + timedout: + type: integer + description: The total number of timed out requests. + readOnly: true + unknown: + type: integer + description: The total number of requests + completed with an unknown error. + readOnly: true + example: + resolver_zone1: + requests: + name: 25460 + srv: 130 + addr: 2580 + responses: + noerror: 26499 + formerr: 0 + servfail: 3 + nxdomain: 0 + notimp: 0 + refused: 0 + timedout: 243 + unknown: 478 + NginxWorker: + title: Worker process + description: | + Statistics per each worker process. + properties: + id: + type: integer + description: The ID of the worker process. + pid: + type: integer + description: The PID identifier of the worker process used by the operating system. + connections: + type: object + description: | + The number of accepted, dropped, active, and idle connections + per worker process. + properties: + accepted: + type: integer + description: | + The total number of client connections + accepted by the worker process. + dropped: + type: integer + description: | + The total number of client connections + dropped by the worker process. + active: + type: integer + description: | + The current number of active client connections + that are currently being handled by the worker process. + idle: + type: integer + description: | + The number of idle client connections + that are currently being handled by the worker process. + http: + type: object + properties: + requests: + type: object + description: The total number of client requests handled by the worker process. + properties: + total: + type: integer + description: The total number of client requests received by the worker process. + current: + type: integer + description: The current number of client requests that are currently being processed by the worker process. + example: + id: 0 + pid: 32212 + connections: + accepted: 1 + dropped: 0 + active: 1 + idle: 0 + http: + requests: + total: 15 + current: 1 + NginxWorkersMap: + title: Worker processes + description: nginx worker processes object. + type: object + additionalProperties: + $ref: '#/definitions/NginxWorker' + example: + - id: 0 + pid: 32212 + connections: + accepted: 1 + dropped: 0 + active: 1 + idle: 0 + http: + requests: + total: 19 + current: 1 + - id: 1 + pid: 32214 + connections: + accepted: 1 + dropped: 0 + active: 1 + idle: 0 + http: + requests: + total: 15 + current: 0 + NginxError: + title: Error + description: | + nginx error object. + type: object + properties: + error: + type: object + properties: + status: + type: integer + description: HTTP error code. + text: + type: string + description: Error description. + code: + type: string + description: Internal nginx error code. + request_id: + type: string + description: The ID of the request, equals the value of the + $request_id + variable. + href: + type: string + description: Link to reference documentation. diff --git a/content/nginx/deployment-guides/_index.md b/content/nginx/deployment-guides/_index.md new file mode 100644 index 000000000..cc6779b5e --- /dev/null +++ b/content/nginx/deployment-guides/_index.md @@ -0,0 +1,11 @@ +--- +description: Deployment guides for deploying F5 NGINX Plus in cloud environments, global + server load balancing, configuring NGINX Plus to load balance or interoperate with + third‑party technologies, migrating from hardware ADCs to NGINX Plus, and enabling + single sign-on for proxied applications. +menu: + docs: + parent: NGINX Plus +title: Deployment Guides +weight: 200 +--- diff --git a/content/nginx/deployment-guides/amazon-web-services/_index.md b/content/nginx/deployment-guides/amazon-web-services/_index.md new file mode 100644 index 000000000..6731f02fb --- /dev/null +++ b/content/nginx/deployment-guides/amazon-web-services/_index.md @@ -0,0 +1,9 @@ +--- +description: Deployment guides for F5 NGINX Plus in the Amazon Web Services (AWS) cloud + environment. +menu: + docs: + parent: NGINX Plus +title: Amazon Web Services +weight: 100 +--- diff --git a/content/nginx/deployment-guides/amazon-web-services/ec2-instances-for-nginx.md b/content/nginx/deployment-guides/amazon-web-services/ec2-instances-for-nginx.md new file mode 100644 index 000000000..036dab9c6 --- /dev/null +++ b/content/nginx/deployment-guides/amazon-web-services/ec2-instances-for-nginx.md @@ -0,0 +1,225 @@ +--- +description: Create Amazon Elastic Compute Cloud (EC2) instances for running NGINX + Open Source and F5 NGINX Plus. +docs: DOCS-444 +doctypes: +- task +title: Creating Amazon EC2 Instances for NGINX Open Source and NGINX Plus +toc: true +weight: 600 +--- + +These instructions explain how to create instances in the Amazon Elastic Compute Cloud (EC2) environment suitable for running NGINX Open Source and F5 NGINX Plus. + +For NGINX Plus, a faster alternative is to purchase a prebuilt Amazon Machine Image (AMI) in the AWS Marketplace. Several operating systems are available, including Amazon Linux, Red Hat Enterprise Linux, and Ubuntu. For instructions, see [Installing NGINX Plus AMIs on Amazon EC2]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-amazon-web-services.md" >}}). + + +## Prerequisites + +These instructions assume you have: + +- An [AWS account](http://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/AboutAWSAccounts.html). +- If using the instructions in [Automating Installation with Ansible](#automate-ansible), basic Linux system administration skills, including installation of Linux software from vendor‑supplied packages, and file creation and editing. + +In addition, to install NGINX software by following the linked instructions, you need: + +- An NGINX Plus subscription, either paid or a [30‑day free trial](https://www.nginx.com/free-trial-request), if you plan to install that product. +- `root` privilege on the hosts where NGINX Open Source and NGINX Plus are to be installed. If appropriate for your environment, prefix commands with the `sudo` command. + + +## Creating an Amazon EC2 Instance + +1. Log into the [EC2 dashboard](https://console.aws.amazon.com/ec2/) in the AWS Management Console (****). + +2. In the left navigation bar, select **Instances**, then click the  Launch Instance  button. + + + +3. In the **Step 1: Choose an Amazon Machine Image (AMI)** window, click the  Select  button for the Linux distribution of your choice. + + + +4. In the **Step 2: Choose an Instance Type** window, click the radio button for the appropriate instance type. In the screenshot, we are selecting a t2.micro instance, which is normally selected by default and is sufficient for demo purposes. + + **Note:** At the time of publication of this guide, AWS gives you 750 hours of free usage per month with this instance type during the first year of your AWS account. Keep in mind, however, that if they run 24 hours a day, the sets of instances specified in the NGINX deployment guides use up the 750 hours in just a few days (just over 5 days for 6 instances, and just under 4 days for 8 instances). + + Click the  Next: Configure Instance Details  button to continue to the next step. + + + +5. In the **Step 3: Configure Instance Details** window, select the default subnet for your VPC in the **Subnet** field, then click the  Next: Add Storage  button. + + + +6. In the **Step 4: Add Storage** window, leave the defaults unchanged. Click the  Next: Add Tags  button. + + + +7. In the **Step 5: Add Tags** window, click the  Add Tag  button. Type Name in the **Key** field, and in the **Value** field type the instance name (the screenshot shows the result). This name is what will appear in the **Name** column of the summary table on the **Instances** tab of the EC2 dashboard (see the screenshot in Step 12, which shows one instance). + + If you are following these instructions as directed by an NGINX deployment guide, the **Creating EC2 Instances and Installing the NGINX Software** section of the deployment guide specifies the instance names to use. + + Click the  Next: Configure Security Group  button to continue to the next step. + + + +8. In the **Step 6: Configure Security Group** window, select or enter the following values in the indicated fields: + + - **Assign a security group** – + - If you are setting up a deployment with multiple instances (one in an NGINX deployment guide, for instance), and this is the first instance you are creating, select Create a **new** security group. + - For subsequent instances, select Select an **existing** security group instead (it makes sense for all instances in a deployment to use the same security group). + - **Security group name** – Name of the group. If you are following these instructions as directed by an NGINX deployment guide, the **Prerequisites and Required AWS Configuration** section of the deployment guide specifies the group name to use. + - **Description** – Description of the group; the group name is often used. + + /nginx/images/aws-generic-instance-security-group.png + +9. In the table, modify the default rule for SSH connections, if necessary, by selecting or setting the following values. They allow inbound SSH connections from all sources (any IP address): + + - **Type** – SSH + - **Protocol** – TCP + - **Port Range** – 22 + - **Source** – Custom 0.0.0.0/0 + - **Description** – Accept SSH connections from all sources + +10. Create a rule that allows inbound HTTP connections from all sources, by clicking the  Add Rule  button and selecting or setting the following values in the new row: + + - **Type** – HTTP + - **Protocol** – TCP + - **Port Range** – 80 + - **Source** – Custom 0.0.0.0/0 + - **Description** – Accept unencrypted HTTP connections from all sources + + If appropriate, repeat this step to create a rule for HTTPS traffic. + + When you've created all desired rules, click the  Review and Launch  button. + +11. In the **Step 7: Review Instance Launch** window, verify the settings are correct. If so, click the  Launch  button in the lower‑right corner of the window. To change settings, click the  Previous  button to go back to earlier windows. + + + +12. When you click the  Launch  button, a window pops up asking you to select an existing key pair or create a new key pair. Take the appropriate action for your use case, then click the  Launch Instances  button. + + **Note:** It's a best practice – and essential in a production environment – to create a separate key for each EC2 instance, so that if a key is compromised only the single associated instance becomes vulnerable. + + ![Screen of 'Select an existing key pair or create a new key pair' window during creation of Amazon EC2 instance](/nginx/images/aws-nlb-instance-key-pair.png) + + A **Launch Status** window pops up to confirm that your launch is underway. To confirm the details of your instance when the launch completes, click the  View Instances  button on that page. + + The instances you have created so far are listed on the **Instances** dashboard. The following screenshot shows a single instance. + + + +13. Finalize your security group rules. You need to do this only for the first instance in a given set, because all instances in a set can use the same security group. + + - In the left navigation bar, select **Security Groups**. + - Select the security group by clicking its radio button in the leftmost column of the table. A panel opens in the lower part of the window displaying details about the group. + - Open the **Inbound** tab and verify that the rules you created in Steps 9 and 10 are listed. + + + + - Open the **Outbound** tab and click the  Edit  button to create a rule for outbound traffic. The set of rules depends on which ports you have used for traffic handled by the NGINX Plus instances: + + - If, for example, you have used port 80 both for client traffic and for health checks from a load balancer (for example, [AWS Network Load Balancer]({{< relref "high-availability-network-load-balancer.md" >}})), you need only one rule. + - If you have configured separate ports for different purposes, or ports other than 80 (such as 443 for HTTPS), make the appropriate adjustments. + + In the **Destination** field, type the security group's ID, which appears in the **Group ID** column in the upper table (here it's sg-3bdbf55d). + + + +14. To install NGINX software on the instance, [connect](#connect-to-instance) to it, and follow the instructions in the NGINX Plus Admin Guide for [NGINX Open Source]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-open-source#prebuilt" >}} and [NGINX Plus]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}). + + +## Connecting to an EC2 Instance +To install and configure NGINX Open Source or NGINX Plus on an instance, you need to open a terminal window and connect to the instance over SSH. + +1. Navigate to the **Instances** tab on the EC2 Dashboard if you are not there already. + +2. Click the row for an instance to select it. In the screenshot **instance2** is selected. + + + +3. Click the  Connect  button above the list of instances. The **Connect To Your Instance** window pops up. +4. Follow the instructions in the pop‑up window, which are customized for the selected instance (here **instance2**) to provide the name of the key file in the steps and in the sample `ssh` command. + + ![Screenshot of 'Connect To Your Instance' pop-up window for Amazon EC2 instance](/nginx/images/aws-nlb-instance-connect.png) + + +## Installing NGINX Software + +Once you have established a connection with an instance, you can install the NGINX software on it. Follow the instructions in the NGINX Plus Admin Guide for NGINX Open Source and [NGINX Plus]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus.md" >}}). The [Admin Guide]({{< relref "/nginx/admin-guide/_index.md" >}}) also provides instructions for many maintenance tasks. + + +### Automating Installation with a Configuration Manager + +You can automate the installation of NGINX Open Source and NGINX Plus. Instructions for Ansible are provided below. For Chef and Puppet, see these articles on the NGINX, Inc. blog: + +- [Installing NGINX and NGINX Plus with Chef](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/) +- [Deploying NGINX Plus for High Availability with Chef](https://www.nginx.com/blog/nginx-plus-high-availability-chef/) +- [Installing NGINX and NGINX Plus with Puppet](https://www.nginx.com/blog/installing-nginx-nginx-plus-puppet/) + + +#### Automating Installation with Ansible + +NGINX, Inc. publishes a unified Ansible role for NGINX Open Source and NGINX Plus on [Ansible Galaxy](https://galaxy.ansible.com/nginxinc/nginx/) and [GitHub](https://github.com/nginxinc/ansible-role-nginx). Perform these steps to install and run it. + +1. [Connect to the EC2 instance](#connect-instance). + +2. Install Ansible. These commands are appropriate for Debian and Ubuntu systems: + + ```shell + apt update + apt install python-pip -y + pip install ansible + ``` + +3. Install the official Ansible role from NGINX, Inc.: + + ```shell + ansible-galaxy install nginxinc.nginx + ``` + +4. (NGINX Plus only) Copy the nginx-repo.key and nginx-repo.crt files provided by NGINX, Inc. to ~/.ssh/ngx-certs/. + +5. Create a file called **playbook.yml** with the following contents: + + ```none + --- + - hosts: localhost + become: true + roles: + - role: nginxinc.nginx + ``` + +5. Run the playbook: + + ```shell + ansible-playbook playbook.yml + ``` + + +## Optional: Creating an NGINX Open Source AMI + +To streamline the process of installing NGINX Open Source on multiple instances, you can create an AMI from an existing NGINX Open Source instance, and spin up additional instances of the AMI when needed. + +1. Follow the instructions in [Creating Amazon EC2 Instances](#create-ec2-instances) and the NGINX Plus Admin Guide to create an instance and install NGINX Open Source on it, if you have not already. + +2. Navigate to the **Instances** tab on the Amazon EC2 Dashboard. + +3. Select the base instance by clicking its row in the table. In the screenshot, **instance2** is selected. + + + +4. Click the  Actions  button and select Image > Create Image. + + + +5. In the window that pops up, fill in the **Image name** and (optionally) **Image description** fields, then click the  Create image   button. + + screenshot of 'Create Image' pop-up window for creating base AMI in Amazon EC2 + + A **Create Image** window pops up to confirm that the image‑creation request was received. To verify that the image was created, navigate to the **AMIs** tab. + +### Revision History + +- Version 2 (July 2018) – Substitute links to NGINX Plus Admin Guide for sample installation instructions. +- Version 1 (April 2018) – Initial version (NGINX Plus Release 14) diff --git a/content/nginx/deployment-guides/amazon-web-services/high-availability-keepalived.md b/content/nginx/deployment-guides/amazon-web-services/high-availability-keepalived.md new file mode 100644 index 000000000..606b25214 --- /dev/null +++ b/content/nginx/deployment-guides/amazon-web-services/high-availability-keepalived.md @@ -0,0 +1,244 @@ +--- +description: Create a highly available active-passive deployment of F5 NGINX Plus on + AWS with a solution combining keepalived and the AWS Elastic IP address feature. +docs: DOCS-445 +doctypes: +- task +title: Active-Passive HA for NGINX Plus on AWS Using Elastic IP Addresses +toc: true +weight: 200 +--- + +This guide explains how to create a highly available (HA) active‑passive deployment of F5 NGINX Plus in the [Amazon Web Services](https://aws.amazon.com/) (AWS) cloud. It combines the `keepalived`‑based solution for high availability (provided by NGINX for on‑premises HA deployments) with the AWS Elastic IP address feature. + +NGINX also provides a [solution for active‑active HA of NGINX Plus in AWS]({{< relref "high-availability-network-load-balancer.md" >}}), using AWS Network Load Balancer. + + +## Overview + +The [supported solution for HA deployment]({{< relref "../../admin-guide/high-availability/ha-keepalived.md" >}}) of NGINX Plus that uses `keepalived` is designed for on‑premises deployments. It is typically not viable in cloud environments, such as AWS, because of the networking restrictions they impose. + +One method for deploying NGINX Plus in a highly available manner on AWS is to use ELB in front of NGINX Plus instances. However, the method has several disadvantages: + +- It increases the cost of your deployment. +- It limits the number of protocols NGINX Plus and your applications can support. In particular, ELB does not support UDP load balancing. +- It does not provide a single static IP address for NGINX Plus instances, which is a crucial requirement for some applications. + +This guide explains how to create an active‑passive HA deployment of NGINX Plus on AWS that doesn’t require ELB and thus isn't subject to its disadvantages. It combines the `keepalived`‑based solution with AWS’s [Elastic IP address](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) feature. Most importantly, this method addresses the requirement for a single IP address: as long as the primary NGINX Plus instance is operating correctly, it has the Elastic IP address. If the primary fails, the backup instance becomes the primary and reassociates the Elastic IP address with itself, as shown in the figure. + +When two NGINX Plus nodes hosted in AWS share an elastic IP address, the address switches to the backup automatically when the primary goes down, preserving high availability + +As an alternative to ELB, you can use Route 53 to distribute traffic among NGINX Plus instances, relying only on DNS load balancing. However, clients as well as intermediate DNS servers often cache DNS records as specified by the TTL value in the record, so there can be a delay in propagation of the updated records to the clients. This can lead to increased downtime of your applications as observed by clients. Such an update can happen when Route 53 detects the failure of an NGINX Plus instance and removes the corresponding record. In contrast, when you use the HA solution along with Route 53, the record usually doesn’t change because the IP address stays the same, and there is no TTL‑related problem. + +**Notes:** + +- We have successfully tested the instructions on Ubuntu 16.04 LTS (Xenial Xerus) and CentOS 7, with `keepalived` installed from the respective OS vendor repositories. +- Except as noted, perform all steps on both the primary and backup instance. +- The solution is not covered by your NGINX Plus support contract. +- In addition to the [active‑active HA solution]({{< relref "high-availability-network-load-balancer.md" >}}) mentioned above, NGINX offers a solution based on [AWS Lambda](https://aws.amazon.com/lambda/) which does not require installation of any additional software on the NGINX Plus instances. The [NGINX Professional Services](https://www.nginx.com/services/) team can deploy and configure the Lambda‑based solution for you and provide support. + + +## Step 1 – Launch Two NGINX Plus Instances + +The scripts in the HA solution use the AWS API to associate an Elastic IP address with an NGINX Plus instance, and so must have credentials for accessing the API. AWS uses _IAM roles_ to handle credentials, so you need to create a role and attach it to each NGINX Plus instance. Perform these steps (for step‑by‑step instructions, see the [AWS documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)): + +1. Create an IAM role and attach the following custom policy to it. The instance to which the policy applies can manipulate the Elastic IP address (adopt or release it) as well as perform the two indicated `Describe` actions. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AssociateAddress", + "ec2:DescribeInstances", + "ec2:DescribeAddresses", + "ec2:DisassociateAddress" + ], + "Resource": "*" + } + ] + } + ``` + + Alternatively, you can use the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables to provide credentials to the HA scripts, as shown in [Step 5](#ha-aws_keepalived-configure). + +2. Launch two instances and [install NGINX Plus]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-amazon-web-services.md" >}}) on each. (As noted, we tested the instructions on Ubuntu 16.04 LTS and CentOS 7.) + +3. Attach this IAM role to the instance. + + +## Step 2 – Allocate an Elastic IP Address + +Allocate an Elastic IP address and remember its ID. For detailed instructions, see the [AWS documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html#using-instance-addressing-eips-allocating). + + +## Step 3 – Install `keepalived`, `wget`, and the AWS CLI + +1. Install two packages from your OS vendor’s repository: the **keepalived** package and **wget**, which is used by the HA scripts. + + - On Ubuntu systems: + + ```shell + sudo apt-get install keepalived wget + ``` + + - On CentOS systems: + + ```shell + sudo yum install keepalived wget + ``` + +2. Follow the instructions in the [AWS documentation](https://docs.aws.amazon.com/cli/latest/userguide/installing.html) to install the AWS CLI. + + +## Step 4 – Download the HA Scripts + +The NGINX Plus HA solution uses two scripts, which are invoked by `keepalived`: + +- nginx-ha-check – Determines the health of NGINX Plus. +- nginx-ha-notify – Moves the Elastic IP address when a state transition happens, for example when the backup instance becomes the primary. + +1. Create a directory for the scripts, if it doesn’t already exist. + + - On Ubuntu systems: + + ```shell + sudo mkdir -p /usr/lib/keepalived + ``` + + - On CentOS systems: + + ```shell + sudo mkdir -p /usr/libexec/keepalived + ``` + +2. Download the scripts from our [GitHub repository](https://github.com/nginxinc/aws-ha-elastic-ip) into the created directory. + + +## Step 5 – Configure `keepalived` and the HA Scripts + +There are two configuration files for the HA solution: + +- **keepalived.conf** – The main configuration file for `keepalived`, slightly different for each NGINX Plus instance. +- nginx-ha-notify – The script you downloaded in [Step 4](#ha-aws_ha-scripts), with several user‑defined variables. + + +### Creating keepalived.conf + +In the **/etc/keepalived** folder create a file named **keepalived.conf** with the following content. + +```nginx +vrrp_script chk_nginx_service { + script "" + interval 3 + weight 50 +} +vrrp_instance VI_1 { + interface eth0 + priority + virtual_router_id 51 + advert_int 1 + unicast_src_ip + unicast_peer { + + } + authentication { + auth_type PASS + auth_pass + } + track_script { + chk_nginx_service + } + notify "" +} +``` + +You must change values for the following configuration keywords (as you do so, also remove the angle brackets enclosing the placeholder value): + +- `script` in the `chk_nginx_service` block – The script that sends health checks to NGINX Plus. + + - On Ubuntu systems, /usr/lib/keepalived/nginx-ha-check + - On CentOS systems, /usr/libexec/keepalived/nginx-ha-check + +- `priority` – The value that controls which instance becomes primary, with a higher value meaning a higher priority. Use `101` for the primary instance and `100` for the backup. + +- `unicast_src_ip` – This instance's IP address. + +- `unicast_peer` – The other instance's IP address. + +- `auth_pass` – The password string used for authentication between peers. + +- `notify` – The script that is invoked during a state transition. + + - On Ubuntu systems, /usr/lib/keepalived/nginx-ha-notify + - On CentOS systems, /usr/libexec/keepalived/nginx-ha-notify + + +### Creating nginx-ha-notify + +Modify the user‑defined variables section of the nginx-ha-notify script, replacing each `` placeholder with the value specified in the list below: + +```none +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export AWS_DEFAULT_REGION= +HA_NODE_1= +HA_NODE_2= +ALLOCATION_ID= +``` + +- `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` – The credentials for accessing the AWS API. Set them only when an IAM instance profile isn’t used. Otherwise, delete the corresponding two lines. +- `AWS_DEFAULT_REGION` – The AWS region of your deployment. +- `HA_NODE_1` and `HA_NODE_2` – The internal or private DNS names of the two NGINX Plus instances. +- `ALLOCATION_ID` – The ID of the allocated Elastic IP address. + + +## Testing + +Run this command on both instances to start the `keepalived` daemon: + +```shell +sudo service keepalived start +``` + +The instance with the higher priority becomes the primary. As a result, the Elastic IP address becomes associated with the primary instance, as confirmed on the AWS Console. + +To check the instance state, run: + +```shell +cat /var/run/nginx-ha-keepalived.state +``` + +The command outputs `STATE=MASTER` on the primary instance and `STATE=BACKUP` otherwise. + +You can simulate the failure of the primary by stopping the `keepalived` daemon: + +```shell +sudo service keepalived stop +``` + +Check the state on the backup instance, confirming that it has transitioned to `MASTER`. Additionally, in the AWS Console the Elastic IP address is now associated with the new primary instance. + + +## Troubleshooting + +If the solution doesn’t work as expected, check the `keepalived` logs, which are written to /var/log/syslog. Also, you can manually run the commands that invoke the `awscli` utility in the nginx-ha-notify script to check that the utility is working properly. + + +## Caveats + +- In most of our tests it took 5 to 6 seconds for the Elastic IP address to be reassigned. +- Elastic IP address reassignment is not free; see [Amazon EC2 Pricing](https://aws.amazon.com/ec2/pricing/). +- Because the solution relies on the AWS APIs to reassociate the Elastic IP address, in some rare scenarios – such as flip‑flopping (the instances change state rapidly) or split‑brain (the instances lose connectivity with each other) – it is possible for the Elastic IP address not to end up associated with the primary. We were not able to reproduce these scenarios in our testing, however. If they occur, restart `keepalived` on both instances. + + +## Resources + +[Download the HA solution from the NGINX GitHub repository](https://github.com/nginxinc/aws-ha-elastic-ip). + +### Revision History + +- Version 1 (May 2017) – Initial version (NGINX Plus Release 12) + diff --git a/content/nginx/deployment-guides/amazon-web-services/high-availability-network-load-balancer.md b/content/nginx/deployment-guides/amazon-web-services/high-availability-network-load-balancer.md new file mode 100644 index 000000000..dc2d6cbd0 --- /dev/null +++ b/content/nginx/deployment-guides/amazon-web-services/high-availability-network-load-balancer.md @@ -0,0 +1,342 @@ +--- +description: Create a highly available active-active deployment of F5 NGINX Plus on AWS + in combination with AWS Network Load Balancer (NLB). +docs: DOCS-446 +doctypes: +- task +title: Active-Active HA for NGINX Plus on AWS Using AWS Network Load Balancer +toc: true +weight: 100 +--- + +This guide explains how to create our recommended solution for a highly available, active‑active deployment of NGINX Plus in the Amazon Web Services (AWS) cloud. The solution combines the AWS Network Load Balancer (NLB) for fast and efficient handling of Layer 4 traffic with F5 NGINX Plus for advanced, Layer 7 features such as load balancing, caching, and content‑based routing. The combined solution is fast, powerful, reliable, and likely to be relatively low‑cost. + +This guide explains how to set up an AWS NLB in front of one pair of NGINX Plus load balancers. (You can increase resiliency as needed by following the same steps for additional NGINX Plus instances.) + +The [Appendix](#appendix) provides instructions for creating EC2 instances with the names used in this guide, and installing and configuring the NGINX software on them. + + +## About AWS NLB + +AWS NLB is optimized for fast, efficient load balancing at the connection level (Layer 4). AWS NLB uses a [flow hash routing algorithm](https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/how-elastic-load-balancing-works.html#routing-algorithm). + +AWS NLB is ideal for fast load balancing of TCP traffic, as it's able to handle millions of requests per second while maintaining ultra‑low latencies. This enables AWS NLB to more easily handle volatile traffic patterns – patterns with sudden and dramatic changes in the amount of traffic. + +Unlike previous AWS solutions, AWS NLB supports both static IP addresses and [Elastic IP addresses](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). + + +## About NGINX Plus + +NGINX Plus is complementary to NLB. Operating at Layer 7 (the application layer), it uses more advanced load‑balancing criteria, including schemes that rely on the content of requests and the results of NGINX Plus' [active health checks]({{< relref "../../admin-guide/load-balancer/http-health-check.md" >}}). + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of the [NGINX Open Source](http://nginx.org/en) software. NGINX Plus is a complete application delivery platform, extending the power of NGINX with a host of enterprise‑ready capabilities that enhance an AWS web application deployment and are instrumental to building web applications at scale. + +NGINX Plus provides both reverse‑proxy features and load‑balancing features, including: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + +## Solution Overview + +The setup in this guide combines AWS NLB, AWS target groups, Amazon Elastic Compute Cloud (EC2) instances running NGINX Plus, and EC2 instances running NGINX Open Source, which together provide a highly available, all‑active NGINX and NGINX Plus solution. + + + +AWS NLB handles Layer 4 TCP connections and balances traffic using a flow hash routing algorithm. By default, an AWS NLB has a DNS name to which an IP address is assigned dynamically, but you can optionally attach an Elastic IP address to the AWS NLB to ensure that it will always be reachable at the same IP address. + +The AWS NLB listens for incoming connections as defined by its listeners. Each listener forwards a new connection to one of the available instances in a target group, chosen using the flow hash routing algorithm. + +In this guide, the target group consists of two NGINX Plus load balancer instances. However, you can register an unlimited number of instances in the target group, or use an [AWS Auto Scaling group](https://aws.amazon.com/autoscaling/) to dynamically adjust the number of NGINX Plus instances. + + +## Prerequisites + +These instructions assume you have the following: + +- [An AWS account](http://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/AboutAWSAccounts.html). +- Six EC2 instances, four running NGINX Open Source and two running NGINX Plus. You need a subscription for each NGINX Plus instance, either paid or a [30‑day free trial](https://www.nginx.com/free-trial-request). + + The [Appendix](#appendix) provides instructions for creating instances with the expected names, and installing and configuring the NGINX software. + +- Familiarity with NGINX and NGINX Plus configuration syntax. Complete configuration snippets are provided, but not analyzed in detail. + + + +## Configuring an AWS Network Load Balancer + +With NGINX Open Source and NGINX Plus installed and configured on the EC2 instances (see the [Appendix](#appendix)), we’re ready to configure an AWS NLB for a highly available, all‑active NGINX Plus setup. + +- [Allocating an Elastic IP Address](#nlb-eip) +- [Creating an AWS NLB](#nlb-create) +- [Configuring the AWS NLB Routing Options](#nlb-routing-options) +- [Registering Instances in the Target Group](#nlb-register-instances) +- [Launching the AWS NLB](#nlb-launch) + + +### Allocating an Elastic IP Address + +The first step is to allocate an Elastic IP address, which becomes the fixed IP address for your AWS NLB. (While using an Elastic IP address is optional, we strongly recommend that you do so. With a dynamic IP address, the AWS NLB might not remain reachable if you reconfigure or restart it.) + +1. Log in to the [AWS Management Console](https://console.aws.amazon.com/ec2/) for EC2 (****). + +2. In the left navigation bar, select **Elastic IPs**, then click either of the  Allocate new address  buttons. + + + +3. In the **Allocate new address** window that opens, click the  Allocate  button. + + + +4. When the message appears indicating that the request for an Elastic IP address succeeded, click the  Close  button. + + + +The new Elastic IP address appears on the **Elastic IPs** dashboard. + + + + +### Creating the AWS NLB + +1. In the left navigation bar, select **Load Balancers**, then click the  Create Load Balancer  button. + + + +2. In the **Select load balancer type** window that opens, click the  Create  button in the  Network Load Balancer  panel (the center one). + + + +3. In the **Step 1: Configure Load Balancer** window that opens, enter the following values: + - In the **Basic Configuration** section: + - **Name** – Name of your AWS NLB (aws-nlb-lb in this guide). + - **Scheme** – internet-facing. + - In the **Listeners** section: + - **Load Balancer Protocol** – TCP (the only available option). + - **Load Balancer Port** – Port on which your AWS NLB listens for incoming connections. In this guide, and for most web applications, it is port 80. + - In the **Availability Zones** section, the zones that host the EC2 instances to which your AWS NLB routes traffic. Click the radio button in the leftmost column of the appropriate row: + - If you set up your instances with the instructions in [Creating Amazon EC2 Instances for NGINX Open Source and NGINX Plus]({{< relref "ec2-instances-for-nginx.md" >}}), select the default subnet within the default [Amazon Virtual Private Cloud](https://aws.amazon.com/vpc/) (VPC) to target a single availability zone. + - If you set up your instances using our scripts for [Packer and Terraform](#create-instances-automated), use the aws-nlb-subnet within the aws-nlb-vpc VPC to target a single availability zone. + + + +4. When you select an availability zone in the table, a drop‑down menu appears in the **Elastic IP** column. Select the address you allocated in [Allocating an Elastic IP Address](#nlb-eip). + +5. Click the  Next: Configure Routing  button in the lower‑right corner of the window. + + +### Configuring the AWS NLB Routing Options + +In the **Step 2: Configure Routing** window that opens, you create a _target group_, which contains the set of EC2 instances across which your AWS NLB load balances traffic (you'll specify those instances in [Registering Instances in the Target Group](#nlb-register-instances)). + +1. In the **Target group** section, select or enter the following values: + + - **Target group** – New target group + - **Name** – Name of the target group (for this guide, aws-nlb-tg) + - **Protocol** – TCP (the only available option) + - **Port** – The port you specified for the **Load Balancer Port** field in Step 3 of the [previous section](#nlb-create) (80 in this guide) + - **Target type** – instance + +2. In the **Health checks** section, open the **Advanced health check settings** subsection and enter the following values: + + - **Protocol** – Protocol the AWS NLB uses when sending health checks. This guide uses TCP, which means the AWS NLB makes a health check by attempting to open a TCP connection on the port specified in the next field. + - **Port** – Port on the target instances to which the AWS NLB sends health checks. In this guide, we're selecting traffic port to send health checks to the same port as regular traffic. + - **Healthy threshold** – Number of consecutive health checks an unhealthy instance must pass to be considered healthy. + - **Unhealthy threshold** – Number of consecutive health checks a healthy instance must fail to be considered unhealthy. + - **Timeout** – Number of seconds the AWS NLB waits for a response to the health check before considering the instance unhealthy. + - **Interval** – Number of seconds between health checks. + + + + If you want to use HTTP‑based health checks, select HTTP or HTTPS in the **Protocol** field instead of TCP. Two additional fields open (not shown in the screenshot): + + - **Path** – The path to which the AWS NLB sends a `GET` request as the health check. + + - **Success codes** – Range of HTTP response codes the AWS NLB accepts as indicating a successful health check. + +3. Click the  Next: Register Targets  button in the lower‑right corner of the window. + + +### Registering Instances in the Target Group + +In the **Step 3: Register Targets** window that opens, you add instances to the empty target group you created in the previous section. For this guide, we add both of our NGINX Plus load balancer instances. + +1. In the **Instances** table, click the radio button in the left‑most column for the two NGINX Plus load balancer instances, ngx-plus-1 and nginx-plus-2. + + + +2. Click the  Add to registered  button above the table. The instances are added to the **Registered targets** table. + + + +3. Click the  Next: Review  button in the lower‑right corner of the window. + + +### Launching the AWS NLB + +In the **Step 4: Review** window that opens: + +1. Verify that the settings are correct. If so, click the  Create  button in the lower‑right corner of the window. To change settings, click the  Previous  button to go back to previous screens. + + + +2. The AWS NLB is provisioned. When the success message appears, click the  Close  button to return to the **Load Balancers** dashboard. + + + +3. The **Load Balancers** dashboard opens. As noted in the previous **Load Balancer Creation Status** window, it can take a few minutes to provision the AWS NLB. When the value in the **State** column of the table changes to active, click the radio button in the left‑most column to display details about the AWS NLB. + + + +4. To verify that the AWS NLB is working correctly, open a new browser window and navigate to the AWS NLB's public DNS name, which appears in the **DNS name** field in the **Basic Configuration** section of the dashboard. [If you copy and paste the DNS name, be sure not to include the parenthesized words at the end, (A Record).] + + The default **Welcome to nginx!** page indicates that the AWS NLB has successfully forwarded a request to one of the two NGINX Plus instances. + + + +5. To verify that the NGINX Plus load balancer is working correctly, add /backend-one and then /backend-two to the public DNS name. The pages indicate that you have reached NGINX instances serving the two backend applications, App 1 and App 2. + + + + + + +## Appendix + +This Appendix provides links to instructions for creating EC2 instances with the names used in this guide, and then installing and configuring NGINX Open Source and NGINX Plus on them: + +- [Creating EC2 Instances and Installing the NGINX Software](#create-instance-install-nginx) +- [Configuring NGINX Open Source on the Web Servers](#configure-web-servers) +- [Configuring NGINX Plus on the Load Balancers](#configure-load-balancers) + +As an alternative to creating and configuring instances individually, you can use our Packer and Terraform scripts to completely automate the process: + +- [Automating Instance Setup with Packer and Terraform](#create-instances-automated) + +After completing the instructions, you have completed the prerequisites for this guide and can continue to [Configuring an AWS Network Load Balancer](#nlb-configure). + + +### Creating EC2 Instances and Installing the NGINX Software + +The deployment in this guide uses six EC2 instances: two instances running NGINX Plus that load balance traffic to four instances running NGINX Open Source as a web server. The four NGINX Open Source instances are deployed in two pairs, each pair running a different app. + +Step‑by‑step instructions for creating EC2 instances and installing NGINX Open Source and NGINX Plus are provided in our deployment guide, [Creating Amazon EC2 Instances for NGINX Open Source and NGINX Plus]({{< relref "ec2-instances-for-nginx.md" >}}). + +**Note:** When installing NGINX Open Source or NGINX Plus, you connect to each instance over SSH. To save time, leave the SSH connection to each instance open after installing the software, for reuse when you configure it with the instructions in the sections below. + +Assign the following names to the instances, and then install the indicated NGINX software. The screenshot below shows the resulting **Instances** table. + +- Four NGINX Open Source instances: + - App 1: + - ngx-oss-app1-1 + - ngx-oss-app1-2 + - App 2: + - ngx-oss-app2-1 + - ngx-oss-app2-2 + +- Two NGINX Plus instances: + - ngx-plus-1 + - ngx-plus-2 + + + + +### Configuring NGINX Open Source on the Web Servers + +For the purposes of this guide, you configure the NGINX Open Source instances as web servers that return a page specifying the server name, address, and other information. As an example, here's the page returned by App 1: + + + +Step‑by‑step instructions are provided in our deployment guide, Setting Up an NGINX Demo Environment. + +Repeat the instructions on all four web servers: + +- Running App 1: + - ngx-oss-app1-1 + - ngx-oss-app1-2 +- Running App 2: + - ngx-oss-app2-1 + - ngx-oss-app2-2 + + +### Configuring NGINX Plus on the Load Balancers + +For the purposes of this guide, you configure the NGINX Plus instances as load balancers to distribute requests to the NGINX Open Source web servers set up in [Configuring NGINX Open Source on the Web Servers](#configure-web-servers). + +Step‑by‑step instructions are provided in our deployment guide, Setting Up an NGINX Demo Environment. + +Repeat the instructions on both ngx-plus-1 and ngx-plus-2. + + +## Automating Instance Setup with Packer and Terraform + +As an alternative to individual creation and configuration of the six instances used in this guide, you can use the Packer and Terraform scripts from our [GitHub repository](https://github.com/nginxinc/NGINX-Demos/tree/master/aws-nlb-ha-asg). They generate the complete setup for this guide, with two load balancer instances running NGINX Plus and four web server instances running NGINX Open Source. + +After executing the scripts, you can jump directly into the [instructions for creating an AWS NLB](#nlb-configure) without any further setup. Additionally, the scripts create a new set of networking rules and security group settings to avoid conflicts with any pre‑existing network settings. + +**Note:** Instead of using the default VPC like the [instructions in our Deployment Guide]({{< relref "ec2-instances-for-nginx.md" >}}), the scripts create a new VPC. + +To run the scripts, follow these instructions: + +1. Install [Packer](https://www.packer.io/intro/getting-started/install.html) and [Terraform](https://learn.hashicorp.com/terraform). + +2. Clone or download the scripts from our [GitHub repository](https://github.com/nginxinc/NGINX-Demos/tree/master/aws-nlb-ha-asg): + + - The scripts in packer/ngx-oss are for creating an Ubuntu AMI running open source NGINX. + - The scripts in packer/ngx-plus are for creating an AWS Linux AMI running NGINX Plus. + - The scripts in **terraform** are for launching and configuring the two NGINX Plus load balancer instances and the four NGINX Open Source web server instances. + +3. Set your AWS credentials in the Packer and Terraform scripts: + + - For Packer, set your credentials in the `variables` block in both packer/ngx-oss/packer.json and packer/ngx-plus/packer.json: + + ```none + "variables": { + "home": "{{env `HOME`}}", + "aws_access_key": "", + "aws_secret_key": "" + } + ``` + + - For Terraform, set your credentials in **terraform/provider.tf**: + + ```none + provider "aws" { + region = "us-west-1" + access_key = "" + secret_key = "" + } + ``` + +4. Copy your NGINX Plus certificate and key to **~/.ssh/certs**. + +5. Run the `setup.sh` script: + + ```shell + chmod +x setup.sh + ./setup.sh + ``` + +The script launches two NGINX Plus load balancer instances and four NGINX web server instances and configures the appropriate settings on each instance to run the guide. + +If you want to delete the infrastructure created by Terraform, run the `cleanup.sh` script. + +```shell +chmod +x cleanup.sh +./cleanup.sh +``` + +### Revision History + +- Version 5 (March 2020) – Fix link missed in Version 4 +- Version 4 (November 2019) – Change link to GitHub repo for automated setup +- Version 3 (April 2019) – Modularization of Appendix +- Version 2 (April 2018) – Revisions to Appendix +- Version 1 (November 2017) – Initial version (NGINX Plus Release 13) + diff --git a/content/nginx/deployment-guides/amazon-web-services/ingress-controller-elastic-kubernetes-services.md b/content/nginx/deployment-guides/amazon-web-services/ingress-controller-elastic-kubernetes-services.md new file mode 100644 index 000000000..52bbab9eb --- /dev/null +++ b/content/nginx/deployment-guides/amazon-web-services/ingress-controller-elastic-kubernetes-services.md @@ -0,0 +1,199 @@ +--- +description: Use NGINX or F5 NGINX Plus as the Ingress Controller for Amazon Elastic + Kubernetes Services. +docs: DOCS-447 +doctypes: +- task +title: Using NGINX or NGINX Plus as the Ingress Controller for Amazon Elastic Kubernetes + Services +toc: true +weight: 400 +--- + +This guide explains how to use the NGINX Open Source or F5 NGINX Plus Ingress Controller for Kubernetes as the Ingress controller for a Kubernetes cluster hosted in the Amazon Elastic Kubernetes Service (EKS). + +**Note:** The instructions in this document apply to both the NGINX and NGINX Plus Ingress Controllers for Kubernetes. For ease of reading, the document refers to NGINX Plus only. + + + +## Prerequisites + +- [An AWS account](https://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/AboutAWSAccounts.html). +- A prebuilt image of the NGINX or NGINX Plus Ingress Controller for Kubernetes. For NGINX Open Source, NGINX provides a prebuilt image on DockerHub, or you can build your own with our instructions. For NGINX Plus, you must build an image. + + **Note:** If you build the image, do not push it to a public registry. Run the following `make` `container` command. Include the `PREFIX` argument to specify the repo in your private registry where the container is created; this also sets the name that you can later use to reference the image, instead of its numerical ID. In this example we set it to nginx/nginx-plus-ingress. + + +```shell +make container DOCKERFILE=DockerfileForPlus PREFIX=nginx/nginx-plus-ingress +``` + + +## Creating an Amazon EKS Cluster +In this guide we’re using the `eksctl` command to create an Amazon EKS cluster (you can also use the AWS Management Console or AWS CLI). + +1. If the `eksctl` command is not already installed, or to make sure you have the latest version, follow the instructions in the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html#installing-eksctl). + +2. Create an Amazon EKS cluster by following the instructions on the **eksctl** tab in the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). In Step 1 of those instructions, use the `eksctl` `create` `cluster` command shown in the **Cluster with Linux‑only workloads** section. + + + + +## Pushing the NGINX Plus Ingress Controller Image to AWS ECR + +1. Create a repository in the Amazon Elastic Container Registry (ECR) using the instructions in the [AWS documentation](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-create.html). In this guide, we name the repository nginx-plus-ic in Step 5 of the AWS instructions. + +2. Run the following AWS CLI command. It generates an authorization token for your AWS ECR registry and outputs the `docker` `login` command for logging into the registry. For details about the command, see the [AWS documentation](https://docs.aws.amazon.com/cli/latest/reference/ecr/get-login.html). + + ```shell + aws ecr get-login --no-include-email --region + ``` + +3. Run the `docker` `login` command generated in Step 2. + +4. Run the following command to apply the tag `edge` to your NGINX Plus Ingress Controller image, where: + + - `` is the value you specified with the `PREFIX` parameter to the `make` `container` command you ran to create the NGINX Plus Ingress Controller image (see [Prerequisites](#prereqs)). In this guide it is `nginx/nginx-plus-ingress`. + - `` is your AWS account number. For instructions on retrieving the ID, see the [AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/console_account-alias.html). + - `` is the same region name you specified in Step 2 above. + - `` is the AWS ECR repository you created in Step 1 above. In this guide it is called `nginx-plus-ic`. + + ```shell + docker tag :edge .dkr.ecr..amazonaws.com/:edge + ``` + + So in this guide, the command is + + ```shell + docker tag nginx/nginx-plus-ingress:edge .dkr.ecr..amazonaws.com/nginx-plus-ic:edge + ``` + +5. Push the NGINX Plus Ingress Controller image to AWS ECR: + + ```shell + docker push .dkr.ecr..amazonaws.com/:edge + ``` + + +## Installing the NGINX Plus Ingress Controller +Install your NGINX Plus Ingress Controller image in the Amazon EKS cluster you created in [Creating an Amazon EKS Cluster](#amazon-eks), using the instructions in [our documentation](https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-manifests/). + + +## Using NLB in Front of the NGINX Plus Ingress Controller + +By default, Amazon EKS uses [Classic Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/introduction.html) for Kubernetes services of type `LoadBalancer`. We recommend that you use [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) (NLB) instead, and this section provides instructions for configuring it. We also recommend that you enable the PROXY Protocol for both the NGINX Plus Ingress Controller and your NLB target groups. If you choose not to enable the PROXY protocol, see the [Appendix](#appendix). + +We assume you performed all the steps in the instructions referenced in [Installing the NGINX Plus Ingress Controller](#ingress-controller), and [built a service for your NGINX Plus Ingress Controller](https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-manifests/#create-a-service-for-the-ingress-controller-pods). If you created a `LoadBalancer` service, you can either edit its configuration or add a new `LoadBalancer` service. If you created a `NodePort` service, you must add a new `LoadBalancer` service now. + +### Configuring a `LoadBalancer` Service to Use NLB + + +1. In service/loadbalancer-aws-elb.yaml, add the following annotation to the existing or new `LoadBalancer` service: + + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb + ``` + +2. Run the following command: + + ```shell + kubectl apply -f service/loadbalancer-aws-elb.yaml + ``` + +### Enabling the PROXY Protocol + +1. Add the following keys to the common/nginx-config.yaml config map file: + + ```yaml + proxy-protocol: "True" + real-ip-header: "proxy_protocol" + set-real-ip-from: "0.0.0.0/0" + ``` + +2. Run the following command to update the config map: + + ```shell + kubectl apply -f common/nginx-config.yaml + ``` + +3. Enable the PROXY Protocol on the target group associated with the NLB created for your `LoadBalancer` service, by performing the steps in the **Enable Proxy Protocol** section of the [AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html#proxy-protocol). + + + +## Testing + +1. Follow the [instructions](https://github.com/nginxinc/kubernetes-ingress/tree/master/examples/complete-example) for setting up our sample deployment of a demo app load balanced by the NGINX Plus Ingress controller. + +2. Navigate in a browser to **** (or issue the `curl` command against that URL). + +3. Run the following command to display the name of the running Ingress pod: + + ```shell + kubectl get pods -namespace=nginx-ingress + ``` + +4. Run the following command to display the logs from the NGINX Plus Ingress Controller, where `` is the name you learned in the previous step. If the logged IP address matches the IP address from which you accessed ** .example.com/coffee**, the PROXY Protocol is enabled. + + ```shell + kubectl logs nginx-ingress- -n nginx-ingress + ``` + + + +## Appendix: Disabling the PROXY Protocol + +If you choose to disable the PROXY Protocol, perform these steps. + +1. If the common/nginx-config.yaml config map file includes these keys, remove them: + + ```yaml + proxy-protocol: "True" + real-ip-header: "proxy_protocol" + set-real-ip-from: "0.0.0.0/0" + ``` + + +2. Run the following command to update the config map: + + ```shell + kubectl apply -f common/nginx-config.yaml + ``` + +3. In the service/loadbalancer-aws-elb.yaml service file, add the `externalTrafficPolicy` key in the `spec` section and set it to `Local`, as in this example: + + ```yaml + apiVersion: v1 + kind: Service + metadata: + name: nginx-ingress-nlb + namespace: nginx-ingress + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp" + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" + service.beta.kubernetes.io/aws-load-balancer-type: nlb + spec: + externalTrafficPolicy: Local + type: LoadBalancer + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + app: nginx-ingress + ``` + +4. Run the following command to update the service: + + ```shell + kubectl apply -f service/loadbalancer-aws-elb.yaml + ``` + +### Revision History + +- Version 1 (March 2020) – Initial version (NGINX Plus Release 20) + diff --git a/content/nginx/deployment-guides/amazon-web-services/route-53-global-server-load-balancing.md b/content/nginx/deployment-guides/amazon-web-services/route-53-global-server-load-balancing.md new file mode 100644 index 000000000..b04f7f557 --- /dev/null +++ b/content/nginx/deployment-guides/amazon-web-services/route-53-global-server-load-balancing.md @@ -0,0 +1,479 @@ +--- +description: Deploy global server load balancing (GSLB) for domains hosted in multiple + AWS regions, with Amazon Route 53 and F5 NGINX Plus in an HA configuration. +docs: DOCS-448 +doctypes: +- task +title: Global Server Load Balancing with Amazon Route 53 and NGINX Plus +toc: true +weight: 300 +--- + +This deployment guide explains how to configure global server load balancing (GSLB) of traffic for web domains hosted in Amazon [Elastic Compute Cloud](https://aws.amazon.com/ec2/) (EC2). For high availability and improved performance, you set up multiple backend servers (web servers, application servers, or both) for a domain in two or more AWS regions. Within each region, NGINX Plus load balances traffic across the backend servers. + +The AWS Domain Name System (DNS) service, [Amazon Route 53](https://aws.amazon.com/route53/), performs global server load balancing by responding to a DNS query from a client with the DNS record for the region that is closest to the client and hosts the domain. For best performance and predictable failover between regions, "closeness" is measured in terms of network latency rather than the actual geographic location of the client. + +The [Appendix](#appendix) provides instructions for creating EC2 instances with the names used in this guide, and installing and configuring the F5 NGINX software on them. + + +## About NGINX Plus + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of the [NGINX Open Source](https://nginx.org/en) software. NGINX Plus is a complete application delivery platform, extending the power of NGINX with a host of enterprise‑ready capabilities that enhance an AWS application server deployment and are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + +## Topology for Global Load Balancing with Amazon Route 53 and NGINX Plus + +The setup for global server load balancing (GSLB) in this guide combines Amazon Elastic Compute Cloud (EC2) instances, Amazon Route 53, NGINX Plus instances, and NGINX Open Source instances. + +**Note:** [Global server load balancing](https://www.nginx.com/resources/glossary/global-server-load-balancing/) is also sometimes called _global load balancing_ (GLB). The terms are used interchangeably in this guide. + +Diagram showing a topology for global server load balancing (GSLB). Eight backend servers, four in each of two regions, host the content for a domain. Two NGINX Plus load balancers in each region route traffic to the backend servers. For each client requesting DNS information for the domain, Amazon Route 53 provides the DNS record for the region closest to the client. + +Route 53 is a Domain Name System (DNS) service that performs global server load balancing by routing each request to the AWS region closest to the requester's location. This guide uses two regions: US West (Oregon) and US East (N. Virginia). + +In each region, two or more NGINX Plus load balancers are deployed in a high‑availability (HA) configuration. In this guide, there are two NGINX Plus load balancer instances per region. You can also use NGINX Open Source for this purpose, but it lacks the [application health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) that make for more precise error detection. For simplicity, we'll refer to NGINX Plus load balancers throughout this guide, noting when features specific to NGINX Plus are used. + +The NGINX Plus instances load balance traffic across web or app servers in their region. The diagram shows four backend servers, but you can deploy as many as needed. In this guide, there are two NGINX Open Source web servers in each region (four total); each one serves a customized static page identifying the server so you can track how load balancing is working. + +Health checks are an integral feature of the configuration. Route 53 monitors the health of each NGINX Plus load balancer, marking it as down if a connection attempt times out or the HTTP response code is not `200 OK`. Similarly, the NGINX Plus load balancer monitors the health of the upstream web servers and propagates those errors to Route 53. If both web servers or both NGINX Plus instances in a region are down, Route 53 fails over to the other region. + + +## Prerequisites + +The instructions assume you have the following: + +- [An AWS account](http://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/AboutAWSAccounts.html). +- An NGINX Plus subscription, either [purchased](https://www.nginx.com/products/pricing/#nginx-plus) or a [free 30-day trial](https://www.nginx.com/free-trial-request/). +- Familiarity with NGINX and NGINX Plus configuration syntax. Complete configuration snippets are provided, but not analyzed in detail. +- Eight EC2 instances, four in each of two regions. The [Appendix](#appendix) provides instructions for creating instances with the expected names, and installing and configuring NGINX Plus and NGINX Open Source as appropriate. + + +## Configuring Global Server Load Balancing + +With the [required AWS configuration](#prereqs) in place, we're ready to configure Route 53 for global server load balancing. + +Complete step‑by‑step instructions are provided in the following sections: + +- [Creating a Hosted Zone](#hosted-zone) +- [Linking the Domain to EC2 Instances](#link-instances) +- [Configuring Health Checks for Route 53 Failover](#route-53-health-checks) +- [Configuring NGINX Plus Application Health Checks](#nginx-plus-health-checks) + + +### Creating a Hosted Zone + +Create a _hosted zone_, which basically involves designating a domain name to be managed by Route 53. As covered in the instructions, you can either use (transfer) an existing domain name or purchase a new one from Amazon. + +**Note**: When you transfer an existing domain, it can take up to 48 hours for the updated DNS record to propagate. The propagation time is usually much shorter for a new domain. + +1. Log in to the [AWS Management Console](https://console.aws.amazon.com/) (**console.aws.amazon.com/**). + +2. Access the Route 53 dashboard page by clicking **Services** in the top AWS navigation bar, mousing over **Networking** in the All AWS Services column and then clicking **Route 53**. + + Screenshot showing how to access the Amazon Route 53 dashboard to configure global load balancing (GLB) with NGINX Plus + +3. Depending on your history working with Route 53 instances, you might see the **Route 53 Dashboard**. Navigate to the **Registered domains** tab, shown here: + + Screenshot showing the Route 53 Registered domains tab during configuration of NGINX GSLB (global server load balancing) + + If you see the Route 53 home page instead, access the **Registered domains** tab by clicking the  Get started now  button under Domain registration. + + Screenshot showing the Amazon Route 53 homepage for a first-time Route 53 user during configuration of AWS GSLB (global server load balancing) with NGINX Plus + +4. On the **Registered domains** tab (the first figure in the Step 3), click either the  Register Domain  or  Transfer Domain  button as appropriate and follow the instructions. +5. When you register or transfer a domain, AWS by default creates a hosted zone for it. To verify that there is a hosted zone for your domain, navigate to the **Hosted Zones** tab on the Route 53 dashboard. This example shows the domain we registered for this guide, nginxroute53.com. + + Screenshot showing a newly registered hosted zone during configuration of Route 53 global load balancing (GLB) with NGINX Plus + + (If no domain is listed, click the  Create Hosted Zone  button. The Create Hosted Zone column opens on the right side of the tab. Type the domain name in the **Domain Name** field and click the  Create  button at the bottom of the column.) + + +### Linking the Domain to EC2 Instances + +Now we link the domain to our EC2 instances so that content for the domain can be served from them. We do this by creating Route 53 _record sets_ for the domain. To implement GSLB, in the record sets we specify **Latency** as the _routing policy_. This means that in response to a client query for DNS information about a domain, Route 53 sends the DNS records for the region hosting that domain in which servers are responding most quickly to addresses in the client's IP range. + +You can also select **Geolocation** as the routing policy, but failover might not work as expected. With the **Geolocation** routing policy, only clients from a specific a continent or country can access the content on a server. If you specify the United States, you also have the option of specifying a state as the "sublocation", limiting access to users from only that state. In this case, you can also create a generic US location to catch all requests that aren't sent from a listed sublocation. + +We recommend that you choose **Geolocation** as the routing policy only for particular use cases, for example when you want to customize content for users in different countries – written in the country's official language with prices in its currency, say. If your goal is to deliver content as quickly as possible, **Latency** remains the best routing policy. + +Create records sets for your domain: + +1. If you're not already on the **Hosted Zones** tab of the Route 53 dashboard, navigate to it. + +2. Click on the domain name in the **Domain Name** row for your hosted zone. + + Screenshot showing how to access the Create Record Set interface for Route 53 hosted zone during configuration of AWS global load balancing (GLB) with NGINX Plus + + The tab changes to display the record sets for the domain. + +3. Click the  Create Record Set  button. The Create Record Set column opens on the right side of the tab, as shown here: + + Screenshot showing Create Record Set interface in Route 53 during configuration of global load balancing (GLB) with NGINX Plus + +4. Fill in the fields in the **Create Record Set** column: + + - **Name** – You can leave this field blank, but for this guide we are setting the name to www.nginxroute53.com. + - **Type** – A – IPv4 address. + - **Alias** – No. + - **TTL (Seconds)** – 60. + + **Note**: Reducing TTL from the default of 300 in this way can decrease the time that it takes for Route 53 to fail over when both NGINX Plus load balancers in the region are down, but there is always a delay of about two minutes regardless of the TTL setting. This is a built‑in limitation of Route 53. + + - **Value** – [Elastic IP addresses](#elastic-ip) of the NGINX Plus load balancers in the first region [in this guide, US West (Oregon)]. + - **Routing Policy** – Latency. + +5. A new area opens when you select Latency. Fill in the fields as indicated (see the figure below): + + - **Region** – Region to which the load balancers belong (in this guide, us-west-2). + - **Set ID** – Identifier for this group of load balancers (in this guide, US West LBs). + - **Associate with Health Check** – No. + + When you complete all fields, the tab looks like this: + + Screenshot showing a completed Route 53 record set during configuration of global server load balancing (GSLB) with NGINX Plus + +6. Click the  Create  button. + +7. Repeat Steps 3 through 6 for the load balancers in the other region [in this guide, US East (N. Virginia)]. + +You can now test your website. Insert your domain name into a browser and see that your request is being load balanced between servers based on your location. + + +### Configuring Health Checks for Route 53 Failover + +To trigger failover between AWS regions, we next configure health checks in Route 53. Route 53 monitors the NGINX Plus load balancers and fails over to the next closest region if both NGINX Plus load balancers are timing out or returning HTTP status codes other than `200 OK`. (In this guide, failover is to the other region, since there are only two.) + +**Note:** It can take up to **three minutes** for Route 53 to begin routing traffic to another region. Although the TTL you specify in the record set for a region can slightly affect the amount of time, failover is never instantaneous because of the processing Route 53 must perform. + +Diagram showing failover between AWS regions when Amazon Route 53 is configured for global server load balancing (GSLB) with NGINX Plus + +We create health checks both for each NGINX Plus load balancer individually and for the load balancers in each region as a pair. Then we update the record sets created in the previous section to refer to the health checks. + + +- [Configuring Route 53 Health Checks for Individual Load Balancers](#route-53-health-checks-individual) +- [Configuring Route 53 Health Checks for the Paired Load Balancers in Each Region](#route-53-health-checks-pair) +- [Modifying Record Sets to Associate Them with the Newly Defined Health Checks](#route-53-health-checks-record-sets) + + +#### Configuring Route 53 Health Checks for Individual Load Balancers + +1. Navigate to the **Health checks** tab on the Route 53 dashboard. + + Screenshot of Amazon Route 53 welcome screen seen by first-time user of Route 53 during configuration of global server load balancing (GSLB) with NGINX Plus + +2. Click the  Create health check  button. In the Configure health check form that opens, specify the following values, then click the  Next  button. + + - **Name** – Identifier for an NGINX Plus load balancer instance, for example US West LB 1. + - **What to monitor** – Endpoint. + - **Specify endpoint by** – IP address. + - **IP address** – The [elastic IP address](#elastic-ip) of the NGINX Plus load balancer. + - **Port** – The port advertised to clients for your domain or web service (the default is 80). + + Screenshot of Amazon Route 53 interface for configuring health checks, during configuration of AWS global load balancing (GLB) with NGINX Plus + +3. On the Get notified when health check fails screen that opens, set the **Create alarm** radio button to **Yes** or **No** as appropriate, then click the  Create health check  button. + + Screenshot of Route 53 configuration screen for enabling notifications of failed health checks, during configuration of Route 53 global load balancing (GLB) with NGINX Plus + +4. Repeat Steps 2 and 3 for your other NGINX Plus load balancers (in this guide, US West LB 2, US East LB 1, and US East LB 2). + +5. Proceed to the next section to configure health checks for the load balancer pairs. + + +#### Configuring Route 53 Health Checks for the Paired Load Balancers in Each Region + +1. Click the  Create health check  button. + +2. In the Configure health check form that opens, specify the following values, then click the  Next  button. + + - **Name** – Identifier for the pair of NGINX Plus load balancers in the first region, for example US West LBs. + - **What to monitor** – Status of other health checks . + - **Health checks to monitor** – The health checks of the two US West load balancers (add them one after the other by clicking in the box and choosing them from the drop‑down menu as shown). + - **Report healthy when** – at least 1 of 2 selected health checks are healthy (the choices in this field are obscured in the screenshot by the drop‑down menu). + + Screenshot of Amazon Route 53 interface for configuring a health check of combined other health checks, during configuration of global server load balancing (GSLB) with NGINX Plus + +3. On the Get notified when health check fails screen that opens, set the **Create alarm** radio button as appropriate (see Step 5 in the previous section), then click the  Create health check  button. + +4. Repeat Steps 1 through 3 for the paired load balancers in the other region [in this guide, US East (N. Virginia)]. + +When you have finished configuring all six health checks, the **Health checks** tab looks like this: + +Screenshot showing six successfully configured health checks in Amazon Route 53, during configuration of NGINX Plus for GSLB (global server load balancing) + + +#### Modifying Record Sets to Associate Them with the Newly Defined Health Checks + +1. Navigate to the **Hosted Zones** tab. + +2. Click on the domain name in the **Domain Name** row for your hosted zone. + + Screenshot showing how to access the Create Record Set interface for Route 53 hosted zone during configuration of AWS global load balancing (GLB) with NGINX Plus + + The tab changes to display the record sets for the domain. + +3. In the list of record sets that opens, click the row for the record set belonging to your first region [in this guide, US West (Oregon)]. The Edit Record Set column opens on the right side of the tab. + + Screenshot of interface for editing Route 53 record sets during configuration of global server load balancing (GSLB) with NGINX Plus + +4. Change the **Associate with Health Check** radio button to Yes. + +5. In the **Health Check to Associate** field, select the paired health check for your first region (in this guide, US West LBs). + +6. Click the  Save Record Set  button. + + +### Configuring NGINX Plus Application Health Checks + +When you are using the NGINX Plus load balancer, we recommend that you to configure [application health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) of your backend servers. You can configure NGINX Plus to check more than simply whether a server is responding or returning `5_xx_` – for example, checking whether the content returned by the server is correct. When a server fails a health check, NGINX Plus removes it from the load‑balancing rotation until it passes a configured number of consecutive health checks. If all backend servers are down, NGINX Plus sends a `5_xx_` error to Route 53, which triggers a failover to another region. + +These instructions assume that you have configured NGINX Plus on two EC2 instances in each region, as instructed in [Configuring NGINX Plus on the Load Balancers](#configure-load-balancers). + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +1. Connect to the US West LB 1 instance. For instructions, see Connecting to an EC2 Instance. + +2. Change directory to **/etc/nginx/conf.d**. + + ```shell + cd /etc/nginx/conf.d + ``` + +3. Edit the west-lb1.conf file and add the **@healthcheck** location to set up health checks. + + ```nginx + upstream backend-servers { + server ; # Backend 1 + server ; # Backend 2 + zone backend-servers 64k; + } + + server { + location / { + proxy_pass http://backend-servers; + } + + location @healthcheck { + proxy_pass http://backend-servers; + proxy_connect_timeout 1s; + proxy_read_timeout 1s; + health_check uri=/ interval=1s; + } + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_connect_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_read_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout), [server virtual](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +4. Verify the validity of the NGINX configuration and load it. + + ```shell + nginx -t + nginx -s reload + ``` + +5. Repeat Steps 1 through 4 for the other three load balancers (US West LB 2, US East LB 1, and US East LB2). + + In Step 3, change the filename as appropriate (west-lb2.conf, east-lb1.conf, and east-lb2.conf). In the east-lb1.conf and east-lb2.conf files, the `server` directives specify the public DNS names of Backup 3 and Backup 4. + + +## Appendix + +The instructions in this Appendix explain how to create EC2 instances with the names used in this guide, and then install and configure NGINX Open Source and NGINX Plus on them: + +- [Creating EC2 Instances and Installing the NGINX Software](#create-instance-install-nginx) +- [Configuring Elastic IP Addresses](#elastic-ip) +- [Configuring NGINX Open Source on the Backend Servers](#configure-backend-servers) +- [Configuring NGINX Plus on the Load Balancers](#configure-load-balancers) + + +### Creating EC2 Instances and Installing the NGINX Software + +The deployment in this guide uses eight EC2 instances, four in each of two AWS regions. In each region, two instances run NGINX Plus to load balance traffic to the backend (NGINX Open Source) web servers running on the other two instances. + +Step‑by‑step instructions for creating EC2 instances and installing NGINX software are provided in our deployment guide, [Creating Amazon EC2 Instances for NGINX Open Source and NGINX Plus]({{< relref "ec2-instances-for-nginx.md" >}}). + +**Note:** When installing NGINX Open Source or NGINX Plus, you connect to each instance over SSH. To save time, leave the SSH connection to each instance open after installing the software, for reuse when you configure it with the instructions in the sections below. + +Assign the following names to the instances, and then install the indicated NGINX software. + +- In the first region, which is US West (Oregon) in this guide: + + - Two load balancer instances running NGINX Plus: + + - US West LB 1 + - US West LB 2 + + - Two backend instances running NGINX Open Source: + + * Backend 1 + - Backend 2 + +- In the second region, which is US East (N. Virginia) in this guide: + + - Two load balancer instances running NGINX Plus: + + - US East LB 1 + - US East LB 2 + + - Two backend instances running NGINX Open Source: + + * Backend 3 + - Backend 4 + +Here's the **Instances** tab after we create the four instances in the N. Virginia region. + +Screenshot showing newly created EC2 instances in one of two regions, which is a prerequisite to configuring AWS GSLB (global server load balancing) with NGINX Plus + + +### Configuring Elastic IP Addresses + +For some EC2 instance types (for example, on‑demand instances), AWS by default assigns a different IP address to an instance each time you shut it down and spin it back up. A load balancer must know the IP addresses of the servers to which it is forwarding traffic, so the default AWS behavior requires you either to set up a service‑discovery mechanism or to modify the NGINX Plus configuration every time you shut down or restart a backend instance. (A similar requirement applies to Route 53 when we shut down or restart an NGINX Plus instance.) To get around this inconvenience, assign an _elastic IP address_ to each of the eight instances. + +**Note:** AWS does not charge for elastic IP addresses as long as the associated instance is running. But when you shut down an instance, AWS charges a small amount to maintain the association to an elastic IP address. For details, see the Amazon EC2 Pricing page for your pricing model (for example, the **Elastic IP Addresses** section of the [On‑Demand Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) page). + +Perform these steps on all eight instances. + +1. Navigate to the **Elastic IPs** tab on the EC2 Dashboard. + + Screenshot of the Elastic IPs tab used during configuration of a new AWS EC2 instance, which is a prerequisite to configuring NGINX GSLB (global server load balancing) + +2. Click the  Allocate New Address  button. In the window that pops up, click the  Yes, Allocate  button and then the  Close  button. + +3. Associate the elastic IP address with an EC2 instance: + + - Right‑click in the IP address' row and select  Associate Address  from the drop‑down menu that appears. + - In the window that pops up, click in the **Instance** field and select an instance from the drop‑down menu. + + Confirm your selection by clicking the  Associate  button. + + Screenshot of the interface for associating an AWS EC2 instance with an elastic IP address, which is a prerequisite to configuring AWS global load balancing (GLB) with NGINX Plus + +After you complete the instructions on all instances, the list for a region (here, Oregon) looks like this: + +Screenshot showing the elastic IP addresses assigned to four AWS EC2 instances during configuration of global server load balancing (GSLB) with NGINX Plus + + +### Configuring NGINX Open Source on the Backend Servers + +Perform these steps on all four backend servers: Backend 1, Backend 2, Backend 3, and Backend 4. In Step 3, substitute the appropriate name for `Backend X` in the **index.html** file. + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +1. Connect over SSH to the instance (or return to the terminal you left open after installing NGINX Open Source) and change directory to and change directory to your root directory. For the instance in this guide, it is **/home/ubuntu**. + + ```shell + cd /home/ubuntu + ``` + +2. Create a directory called **public_html** and change directory to it. + + ```shell + mkdir public_html + cd public_html + ``` + +3. Using your preferred text editor, create a new file called **index.html** and add this text to it: + + ```none + This is Backend X + ``` + +4. Change directory to **/etc/nginx/conf.d**. + + ```shell + cd /etc/nginx/conf.d + ``` + +5. Rename **default.conf** to **default.conf.bak** so that NGINX Plus does not load it. + + ```shell + mv default.conf default.conf.bak + ``` + +6. Create a new file called **backend.conf** and add this text, which defines the docroot for this web server: + + ```nginx + server { + root /home/ubuntu/public_html; + } + ``` + + Directive documentation: [root](https://nginx.org/en/docs/http/ngx_http_core_module.html#root), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) + +7. Verify the validity of the NGINX configuration and load it. + + ```shell + nginx -t + nginx -s reload + ``` + + +### Configuring NGINX Plus on the Load Balancers + +Perform these steps on all four backend servers: US West LB 1, US West LB 2, US East LB 1, and US West LB 2. + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +1. Connect over SSH to the instance (or return to the terminal you left open after installing NGINX Plus) and change directory to **/etc/nginx/conf.d**. + + ```shell + cd /etc/nginx/conf.d + ``` + +3. Rename **default.conf** to **default.conf.bak** so that NGINX Plus does not load it. + + ```shell + mv default.conf default.conf.bak + ``` + +4. Create a new file containing the following text, which configures load balancing of the two backend servers in the relevant region. The filename on each instance is: + + - For US West LB 1west-lb1.conf + - For US West LB 2west-lb2.conf + - For US East LB 1east-lb1.conf + - For US West LB 2east-lb2.conf + + In the `server` directives in the `upstream` block, substitute the public DNS names of the backend instances in the region; to learn them, see the **Instances** tab in the EC2 Dashboard. + + ```nginx + upstream backend-servers { + server ; # Backend 1 + server ; # Backend 2 + } + server { + location / { + proxy_pass http://backend-servers; + } + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [server virtual](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +5. Verify the validity of the NGINX configuration and load it. + + ```shell + nginx -t + nginx -s reload + ``` + +8. To test that the configuration is working correctly, for each load balancer enter its public DNS name in the address field of your web browser. As you access the load balancer repeatedly, the content of the page alternates between "This is Backend 1" and "This is Backend 2" in your first region, and "This is Backend 3" and "This is Backend 4" in the second region. + +Now that all eight EC2 instances are configured and local load balancing is working correctly, we can set up global server load balancing with Route 53 to route traffic based on the IP address of the requesting client. + +Return to main instructions, [Configuring Global Server Load Balancing](#gslb) + +### Revision History + +- Version 3 (April 2018) – Reorganization of Appendix +- Version 2 (January 2017) – Clarified information about root permissions; miscellaneous fixes (NGINX Plus Release 11) +- Version 1 (October 2016) – Initial version (NGINX Plus Release 10) + diff --git a/content/nginx/deployment-guides/global-server-load-balancing/_index.md b/content/nginx/deployment-guides/global-server-load-balancing/_index.md new file mode 100644 index 000000000..f172f95eb --- /dev/null +++ b/content/nginx/deployment-guides/global-server-load-balancing/_index.md @@ -0,0 +1,9 @@ +--- +description: Configure global server load balancing (GSLB) for websites and applications + proxied by F5 NGINX Plus. +menu: + docs: + parent: NGINX Plus +title: Global Server Load Balancing +weight: 100 +--- diff --git a/content/nginx/deployment-guides/global-server-load-balancing/ns1-global-server-load-balancing.md b/content/nginx/deployment-guides/global-server-load-balancing/ns1-global-server-load-balancing.md new file mode 100644 index 000000000..a6c6cbec5 --- /dev/null +++ b/content/nginx/deployment-guides/global-server-load-balancing/ns1-global-server-load-balancing.md @@ -0,0 +1,482 @@ +--- +description: Deploy global server load balancing (GSLB) for domains registered with + DNS services provider NS1 and proxied by F5 NGINX Plus. +docs: DOCS-449 +doctypes: +- task +title: Global Server Load Balancing with NS1 and NGINX Plus +toc: true +weight: 100 +--- + +Global server load balancing (GSLB) refers to the intelligent distribution of traffic across server resources located in multiple points of presence (PoPs). GSLB is most commonly implemented by controlling the responses to DNS requests, directing each user to the most appropriate destination IP address based on the availability, performance, and proximity of each PoP. + +Many DNS providers offer some form of GSLB. [NS1](https://www.ns1.com) has one of the most advanced solutions available as a service, with a rich API that PoPs can use to dynamically inform the NS1 servers about their availability and current loads. + +This document describes how to use NGINX’s NS1 agent to interface with the NS1 service, enabling sophisticated GSLB across multiple PoPs that are powered by F5 NGINX Plus. You can colocate an instance of the agent alongside each NGINX Plus instance, or configure an agent to query one or more NGINX Plus instances remotely. (This guide covers only the colocation option.) + +The agent periodically queries the NGINX Plus API for several metrics that it uses to calculate the current number of active connections and load average on the NGINX Plus instance, and reports those metrics to NS1. The agent can also be configured to report the status of the site as ``up:`` ``true`` or ``up:`` ``false`` (that is, down). + +The agent supports the following capabilities: + +- Remote health checks, so clients are not directed to an unavailable (down or otherwise unreachable) PoP +- Local capacity checks, so clients are not directed to a PoP without enough healthy servers +- Central capacity balancing, so clients are balanced across PoPs according to the current load at each PoP, and traffic is drained from PoPs that are overloaded + +The solution functions alongside other NS1 capabilities, such as geo‑proximal routing which directs each client to the closest PoP. + + +## About NGINX Plus + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of [NGINX Open Source](https://nginx.org/en). NGINX Plus is a complete application delivery platform, extending the power of NGINX with a host of enterprise‑ready capabilities that are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + +## Prerequisites + +- A registered domain name +- An NS1 account +- Three or more deployed NGINX Plus instances, each with: + - The NGINX Plus API [enabled](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api) + - Go 1.7 or later [installed](https://golang.org/doc/install) + + +## Setting Up NS1 + +1. Log in to your NS1 account, click  ZONES  in the title bar to open the **Your Zones** page, and click the + Add Zone button in the upper right corner. + + Screenshot of NS1 GUI: ZONES tab + +2. In the **Add Zone** pop‑up window, enter the domain name (**nginxgslb.cf** in this guide) in the **Domain Name** field. We're not changing any of the default settings, but see the [NS1 documentation](https://help.ns1.com/hc/en-us/articles/360022250193) for information about TTL (time-to-live) settings. Click the Save Zone button. + + Screenshot of NS1 GUI: Add Zone popup + +3. On the page that opens, click the  NAMESERVERS  tab and follow [these instructions](https://help.ns1.com/hc/en-us/articles/360016306973-Delegating-a-domain-to-NS1) to delegate the new domain name to NS1. + + Screenshot of NS1 GUI: NAMESERVERS page + +4. Click the  RECORDS  tab. As shown in the screenshot, an ``NS`` (Name Server) record has already been created automatically and appears in the white box. Click either Add Record button. + + Screenshot of NS1 GUI: RECORDS page + +5. The **Add Record** window pops up. Enter the following values: + + - **Record Type** – A (the default). + - name – Leave blank unless you are creating the ``A`` record for a subdomain. + - **TTL** – 3600 is the default, which we are not changing. + - **ANSWERS** – The public IP address of the first NGINX Plus instance. To add each of the other instances, click the Add Answer button. (In this guide we're using private IP addresses in the 10.0.0.0/8 range as examples.) + + Click the  Save All Changes  button. + + Screenshot of NS1 GUI: Add Record popup + +6. The new ``A`` record appears on the  RECORDS  tab. Click the  3 Answers  button at the right end of its row to open the details page for the ``A`` record. (The screenshot – like subsequent screenshots of this page and the details page – shows only the bottom half of the tab.) + + Screenshot of NS1 GUI: NS and A records for nginxgslb.cf + +7. The window that opens shows details for the ``A`` record. The IP addresses of the NGINX Plus instances appear in the **Ungrouped Answers** section. Click the stacked dots icon at the right end of the field for the first address (10.10.10.1 in this guide) and select Edit Answer Metadata. + + Screenshot of NS1 GUI: list of answers for nginxgslb.cf + +8. In the **Answer Metadata** window that pops up, click  Up/down  in the  STATUS  section of the  SETTING  column, if it is not already selected. Click the **Select** box in the  AVAILABLE  column, and then select either Up or Down from the drop‑down menu. In this guide we're selecting Up to indicate that the NGINX Plus instance is operational. + Screenshot of NS1 GUI: setting STATUS answer metadata + +9. Click a value in the  GEOGRAPHICAL  section of the  SETTING  column and specify the location of the NGINX Plus instance. Begin by choosing one of the several types of codes that NS1 offers for identifying locations: + + - **Canadian province(s)** – Two‑letter codes for Canadian provinces + - **Country/countries** – Two‑letter codes for nations and territories + - **Geographic region(s)** – Identifiers like US-WEST and ASIAPAC + - **ISO region code** – Identification codes for nations and territories as defined in [ISO 3166](https://www.iso.org/iso-3166-country-codes.html) + - **Latitude** – Degrees, minutes, and seconds of latitude (northern or southern hemisphere) + - **Longitude** – Degrees, minutes, and seconds of longitude (eastern or western hemisphere) + - **US State(s)** – Two‑letter codes for US states + + In this guide we're using **Country/countries** codes. For the first NGINX Plus instance, we select Americas > Northern America > United States (US) and click the  Ok  button. + + Screenshot of NS1 GUI: setting GEOGRAPHICAL answer metadata + +10. Repeat Steps 7–9 for both of the other two NGINX Plus instances. For the country in Step 9, we're selecting Europe > Western Europe > Germany (DE) for NGINX Plus instance 2 and Asia > South‑Eastern Asia > Singapore (SG) for NGINX Plus instance 3. + + When finished with both instances, on the details page for the ``A`` record click the  Save Record  button. + +11. Click the  Create Filter Chain  button to create a filter chain based on the **Up/Down** and **Country/countries** metadata (for an overview of filter chains, see the [NS1 documentation](https://ns1.com/ns1-filter-chain)). + + Screenshot of NS1 GUI: creating filter chain + +12. In the **Add Filters** window that pops up, click the plus sign (+) on the button for each filter you want to apply. In this guide, we're configuring the filters in this order: + + - Up in the  HEALTHCHECKS  section + - Geotarget Country in the  GEOGRAPHIC  section + - Select First N in the  TRAFFIC MANAGEMENT  section + + Click the  Save Filter Chain  button. + + Screenshot of NS1 GUI: Add Filters page with three filters defined + + +## Installing the NS1 Agent + +In this section we install and configure the NS1 agent on the same hosts as our NGINX Plus instances. We also create an NS1 _data feed_ for each agent, which enables it to send status information about its paired NGINX Plus instance to NS1 via the NS1 API. + +1. Follow the instructions in the [NS1 documentation](https://help.ns1.com/hc/en-us/articles/360020474154) to set up and connect a separate data feed for each of the three NGINX Plus instances, which NS1 calls _answers_. + + On the first page (**Configure a new data source from NSONE Data Feed API v1**) specify a name for the _data source_, which is the administrative container for the data feeds you will be creating. Use the same name each of the three times you go through the instructions. We're naming the data source NGINX-GSLB. + + On the next page (**Create Feed from NSONE Data Feed API v1**), create a data feed for the instance. Because the **Name** field is just for internal use, any value is fine. The value in the **Label** field is used in the YAML configuration file for the instance (see Step 4 below). We're specifying labels that indicate the country (using the ISO 3166 codes) in which the instance is running: + + - us-nginxgslb-datafeed for instance 1 in the US + - de-nginxgslb-datafeed for instance 2 in Germany + - sg-nginxgslb-datafeed for instance 3 in Singapore + + After creating the three feeds, note the value in the **Feeds URL** field on the  INTEGRATIONS  tab. The final element of the URL is the ```` you will specify in the YAML configuration file in Step 4. In the third screenshot in the [NS1 documentation](https://help.ns1.com/hc/en-us/articles/360020474154), for example, it is e566332c5d22c6b66aeaa8837eae90ac. + +2. Follow the instructions in the [NS1 documentation](https://help.ns1.com/hc/en-us/articles/360017341694-Creating-managing-API-keys) to create an NS1 API key for the agent, if you have not already. (To access **Account Settings** in Step 1, click your username in the upper right corner of the NS1 title bar.) We're naming the app NGINX-GSLB. Make note of the key value – you'll specify it as ```` in the YAML configuration file in Step 4. To see the actual hexadecimal value, click on the circled letter **i** in the **API Key** field. + +3. On each NGINX Plus host, clone the [GitHub repo](https://github.com/nginxinc/nginx-ns1-gslb) for the NS1 agent. + +4. On each NGINX Plus host, create the [YAML configuration file](https://github.com/nginxinc/nginx-ns1-gslb/blob/master/configs/README.md) for the NS1 agent. In this guide we're using the following file: + + ```none + agent: + interval: 10 + retry_time: 5 + nginx_plus: + hosts: + - host: "127.0.0.1:8000" + resolve: false + host_header: "127.0.0.1:8000" + api_endpoint: "/api" + client_timeout: 10 + nsone: + api_key: "" + client_timeout: 10 + source_id: "" + services: + method: "upstream_groups" + threshold: 2 + sampling_type: "count" + feeds: + - name: "my_backend" + feed_name: "-nginxgslb-datafeed" + ``` + + The ``hosts`` section configures the agent to run on the same host as the NGINX Plus instance from which it collects metrics – in this guide, localhost. Because localhost is identified by its IP address (127.0.0.1) in the ``host`` field, hostname resolution is unnecessary and ``resolve`` is set to ``false``. The agent gathers metrics from the NGINX Plus API (the ``/api`` endpoint) on port 8000. + + In the ``nsone`` section, include the ```` and ```` values you noted in Step 2 and Step 1, respectively. + + In the ``services`` section, we're specifying [upstream_groups](https://github.com/nginxinc/nginx-ns1-gslb/tree/master/configs#services) as the method for the NS1 agent to use, meaning that it collects metrics about the upstream group that the NGINX Plus instance is load balancing – ``my_backend``, as specified in the ``name`` field of the ``feeds`` section. The ``threshold`` field defines how many servers in the upstream group must be healthy for the backend app to be considered up, and the ``sampling_type`` field tells the agent to collect the sum of active connections to backend servers. (We're leaving actual setup of the backend app and the configuration file for the NGINX Plus instance as exercises for the reader.) + + The agent configuration is the same for each of the paired agents and NGINX Plus instances, except for the value in the ``feed_name`` field (see Step 1 for the feed names). If you choose to configure a different upstream group for each instance, also change the value in the ``name`` field. + + For details about all fields in the configuration file, see the documentation in our [GitHub repo](https://github.com/nginxinc/nginx-ns1-gslb/blob/master/configs/README.md). + +5. Follow the instructions in the [GitHub repo](https://github.com/nginxinc/nginx-ns1-gslb#running-the-agent) to start the agent. + + +## Verifying that NS1 Redistributes Traffic + +In this section we describe how to verify that NS1 correctly redistributes traffic to an alternate PoP when the PoP nearest to the client is not operational (in the setup in this guide, each of the three NGINX Plus instances corresponds to a PoP). There are three ways to indicate to NS1 that a PoP is down: + +- [Change the status of the NGINX Plus instance](#verify-when-status-down) to Down in the NS1 ``A`` record +- [Take down the servers in the proxied upstream group](#verify-when-upstream-down) +- [Cause traffic to exceed a configured threshold](#verify-when-over-threshold) + + +### Verifying Traffic Redistribution when an NGINX Plus Instance Is Marked Down + +Here we verify that NS1 switches over to the next‑nearest NGINX Plus instance when we change the metadata on the nearest NGINX Plus instance to Down. + +1. On a host located in the US, run the following command to determine which site NS1 is returning as nearest. Appropriately, it's returning 10.10.10.1, the IP address of the NGINX Plus instance in the US. + + ```shell + $ nslookup nginxgslb.cf + + Server: 10.10.100.102 + Address: 10.10.100.102#53 + + Non-authoritative answer: + Name: nginxgslb.cf + Address: 10.10.10.1 + ``` + +2. Change the **Up/Down** answer metadata on the US instance to Down (see Step 8 in [Setting Up NS1](#ns1-setup)). + +3. Wait an hour – because we didn't change the default time-to-live (TTL) of 3600 seconds on the ``A`` record for **nginxgslb.cf** – and issue the ``nslookup`` command again. NS1 returns 10.10.10.2, the IP address of the NGINX Plus instance in Germany, which is now the nearest. + + ```shell + $ nslookup nginxgslb.cf + + Server: 10.10.100.102 + Address: 10.10.100.102#53 + + Non-authoritative answer: + Name: nginxgslb.cf + Address: 10.10.10.2 + ``` + + +### Verifying Traffic Redistribution When an Upstream Group Is Down + +With our NGINX Plus instances (answers) connected to NS1 data feeds (see Step 2 in [Installing the NS1 Agent](#agent-install)), we can verify that NS1 redistributes traffic correctly when it receives data from the agent indicating that an upstream group is down. In the following example, it's the **my_backend** group that goes down, as reported in the us-nginxgslb-datafeed feed. + +We run the following commands on a host located in the US. + +1. Query the NGINX Plus API to verify that the current status is ``up`` for the **my_backend** upstream group being proxied by the NGINX Plus instance in the US: + + ```shell + $ curl -X GET "127.0.0.1:8000/api//http/upstreams/my_backend/" -H "accept: application/json" | python -m json.tool | grep state + + "state": "up", + ``` + +2. Query the NS1 API to verify that NS1 also sees the US **my_backend** upstream group as ``up``. (For details about this API call, see the [NS1 documentation](https://ns1.com/api#getget-active-data-feeds-for-a-source). If the page doesn't scroll automatically to the relevant section, search for "Get active data feeds for a source".) + + On the command line, ```` and ```` are the same values we included in the YAML file in Step 4 of [Installing the NS1 Agent](#agent-install). + + The output includes a ``destinations`` entry for each data feed, so we search for the one where the ``label`` field says ``us-nginxgslb-datafeed``, and verify that the ``up`` field in that entry says ``true``. + + ```shell + $ curl -X GET -H 'X-NSONE-Key: ' https://api.nsone.net/v1/data/feeds/ | python -m json.tool + [ + ... + { + "destinations": [ + { + "destid": "", + "desttype": "answer", + "record": "" + } + ], + "id": "", + "data": { + "up": "true" + }, + "config": { + "label": "us-nginxgslb-datafeed" + }, + "name": "us-nginxgslb-datafeed" + }, + ... + ] + ``` + +3. Determine which site NS1 is returning for hosts in the US. Appropriately, it's 10.10.10.1, the IP address of the US‑based NGINX Plus instance. + + ```shell + $ nslookup nginxgslb.cf + + Server: 10.10.100.102 + Address: 10.10.100.102#53 + + Non-authoritative answer: + Name: nginxgslb.cf + Address: 10.10.10.1 + ``` + +4. Take down the servers in the **my_backend** upstream group. There are several ways to do this: turn off the actual app on each server; or on the NGINX Plus instance either change the app's port number in the NGINX Plus configuration file to the wrong value, or use the [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) directive or the [NGINX Plus API](https://docs.nginx.com/nginx/admin-guide/load-balancer/dynamic-configuration-api/#using-the-api-for-dynamic-configuration) to set each server's state to ``down``. + +5. Repeat Step 1. The NGINX Plus API now reports the status as ``unhealthy``. + + ``` none + $ curl -X GET "127.0.0.1:8000/api//http/upstreams/my_backend/" -H "accept: application/json" | python -m json.tool | grep state + + "state": "unhealthy", + ``` + +6. Repeat Step 2. The NS1 API now returns ``false`` in the ``up`` field. + + ```shell + $ curl -X GET -H 'X-NSONE-Key: ' https://api.nsone.net/v1/data/feeds/ | python -m json.tool + [ + ... + { + "destinations": [ + { + "destid": "", + "desttype": "answer", + "record": "" + } + ], + "id": "", + "data": { + "up": "false" + }, + "config": { + "label": "us-nginxgslb-datafeed" + }, + "name": "us-nginxgslb-datafeed" + }, + ... + ] + ``` + +7. Wait an hour – because we didn't change the default TTL of 3600 seconds on the ``A`` record for **nginxgslb.cf** – and repeat Step 3. NS1 returns 10.10.10.2, the IP address of the NGINX Plus instance in Germany, which is now the nearest to US‑based hosts. + + ```shell + $ nslookup nginxgslb.cf + + Server: 10.10.100.102 + Address: 10.10.100.102#53 + + Non-authoritative answer: + Name: nginxgslb.cf + Address: 10.10.10.2 + ``` + + +### Verifying Traffic Redistribution When a Threshold Is Exceeded + +You can configure NS1 to redistribute traffic away from a given NGINX Plus instance when a load metric for the instance exceeds one or more thresholds that you set. The thresholds are set in an NS1 _shed filter_, so‑called because NS1 describes the shifting of traffic to a different IP address as "shedding load" from the current IP address. + +Here we verify that NS1 redistributes traffic correctly when the number of active connections on an instance exceeds the threshold we set. +#### Creating the Shed Filter + +First we perform these steps to create the shed filter: + +1. Navigate to the details page of the ``A`` record for **nginxgslb.cf** under the  ZONES  tab, if it is not already open. Click the  Edit Filter Chain  button. + + Screenshot of NS1 GUI: clicking Edit Filter Chain button + +2. In the **Add Filters** window that opens, click the plus sign (+) on the box labeled **Shed Load** in the  HEALTHCHECKS  section. + + Screenshot of NS1 GUI: clicking Shed Load button on Add Filters page + +3. The **Shed Load** filter is added as the fourth (lowest) box in the **Active Filters** section. Move it to be third by clicking and dragging it above the Select First N box. + +4. Click the  Save Filter Chain  button. + +5. Back on the ``A`` record's details page, in the **Filter Chain** column click the **Shed Load** box, which expands to display an explanation of how the filter works. Click the label on the white box at the bottom of the explanation and select **Active connections** from the drop‑down menu. + + Screenshot of NS1 GUI: selecting Active connections for shed filter + +6. In the **Ungrouped Answers** section, click the stacked dots icon at the right end of the field for the US‑based NGINX Plus instance (10.10.10.1) and select Edit Answer Metadata. + + Screenshot of NS1 GUI: clicking Edit Answer Metadata button for shed filter + +7. In the **Answer Metadata** window that opens, set values for the following metadata. In each case, click the icon in the  FEED  column of the metadata's row, then select or enter the indicated value in the  AVAILABLE  column. (For testing purposes, we're setting very small values for the watermarks so that the threshold is exceeded very quickly.) + + - **Active connections** – us-nginxgslb-datafeed + - **High watermark** – 5 + - **Low watermark** – 2 + + After setting all three, click the  Ok  button. (The screenshot shows the window just before this action.) + + Screenshot of NS1 GUI: Answer Metadata page for shed filter + +#### Testing the Threshold + +With the shed filter in place, we're ready to verify that NS1 shifts traffic to the next‑nearest NGINX Plus instance when the number of active connections on the nearest instance exceeds the high watermark (upper threshold) of 5. As noted in Step 7 just above, we've set a very small value so we can quickly see the effect when it's exceeded. With the low watermark setting of 2, NS1 will start shifting traffic probabilistically when there are three active connections and definitely when there are five or more connections. + +We have written a script that continuously simulates more than four simultaneous connections. We have also configured the backend app to perform a sleep, so that the connections stay open long enough for the agent to report the number of active connections to NS1 before they close. + +We run the following commands on a host located in the US. + +1. Query the NGINX Plus API for the number of active connections: + + ```shell + $ curl -X GET "127.0.0.1:8000/api//connections" -H "accept: application/json" | python -m json.tool | grep active + + "active": 1, + ``` + +2. Query the NS1 API to learn the number of active connections the NS1 agent has reported to NS1. (For details about this API call, see the [NS1 documentation](https://ns1.com/api#getget-data-feed-details). If the page doesn't scroll automatically to the relevant section, search for "Get data feed details".) + + On the command line: + + - ```` and ```` are the same values we included in the YAML file in Step 4 of [Installing the NS1 Agent](#agent-install) and used in Step 2 of [Verifying Traffic Redistribution When an Upstream Group Is Down](#verify-when-upstream-down). + + - ```` is the ID assigned by NS1 to the **us-nginxgslb-datafeed** data feed. It was reported as ```` in the ``id`` field of the output in Step 2 in [Verifying Traffic Redistribution When an Upstream Group Is Down](#verify-when-upstream-down). (It also appears in that field in the following output.) + + The relevant field in the output is ``connections`` in the ``data`` section, and in this example it indicates there is one active connection. + + ```shell + $ curl -X GET -H 'X-NSONE-Key: ' https://api.nsone.net/v1/data/feeds// | python -m json.tool + + { + "config": { + "label": "us-nginxgslb-datafeed" + }, + "data": { + "connections": 1, + "up": true + }, + "destinations": [ + { + "destid": "", + "desttype": "answer", + "record": "" + } + ], + "id": "", + "name": "us-nginxgslb-datafeed", + "networks": [ + 0 + ] + } + ``` + +3. Determine which site NS1 is returning for hosts in the US. Appropriately, it's 10.10.10.1, the IP address of the US‑based NGINX Plus instance. + + ```shell + $ nslookup nginxgslb.cf + + Server: 10.10.100.102 + Address: 10.10.100.102#53 + + Non-authoritative answer: + Name: nginxgslb.cf + Address: 10.10.10.1 + ``` + +4. Create five or more connections to the NGINX Plus instance. We do this by running the script mentioned in the introduction to this section. + +5. Repeat Step 1. The NGINX Plus API now reports five active connections. + + ```shell + $ curl -X GET "127.0.0.1:8000/api//connections" -H "accept: application/json" | python -m json.tool | grep active + + "active": 5, + ``` + +6. Repeat Step 2. The NS1 API also reports five active connections. + + ```shell + $ curl -X GET -H 'X-NSONE-Key: ' https://api.nsone.net/v1/data/feeds// | python -m json.tool + + { + "config": { + "label": "us-nginxgslb-datafeed" + }, + "data": { + "connections": 5, + "up": true + }, + ... + } + ``` + +7. Wait an hour – because we didn't change the default time-to-live (TTL) of 3600 seconds on the ``A`` record for **nginxgslb.cf** – and repeat Step 3. NS1 returns 10.10.10.2, the IP address of the NGINX Plus instance in Germany, which is the nearest now that the instance in the US has too many active connections. + + ```shell + $ nslookup nginxgslb.cf + + Server: 10.10.100.102 + Address: 10.10.100.102#53 + + Non-authoritative answer: + Name: nginxgslb.cf + Address: 10.10.10.2 + ``` + +### Revision History + +- Version 1 (September 2019) – Initial version (NGINX Plus Release 19) + diff --git a/content/nginx/deployment-guides/google-cloud-platform/_index.md b/content/nginx/deployment-guides/google-cloud-platform/_index.md new file mode 100644 index 000000000..c2b64171f --- /dev/null +++ b/content/nginx/deployment-guides/google-cloud-platform/_index.md @@ -0,0 +1,8 @@ +--- +description: Deployment guides for F5 NGINX Plus on the Google Cloud Platform. +menu: + docs: + parent: NGINX Plus +title: Google Cloud Platform +weight: 100 +--- diff --git a/content/nginx/deployment-guides/google-cloud-platform/high-availability-all-active.md b/content/nginx/deployment-guides/google-cloud-platform/high-availability-all-active.md new file mode 100644 index 000000000..fa9afcd78 --- /dev/null +++ b/content/nginx/deployment-guides/google-cloud-platform/high-availability-all-active.md @@ -0,0 +1,987 @@ +--- +description: Configure highly available F5 NGINX Plus load balancing of application instances, + in an all-active deployment on the Google Cloud Platform. +docs: DOCS-450 +doctypes: +- task +title: All-Active HA for NGINX Plus on the Google Cloud Platform +toc: true +weight: 100 +--- + +This deployment guide explains how to create and configure the initial components for an all‑active, high‑availability deployment of F5 NGINX Plus on Google Compute Engine (GCE), the Google Cloud Platform (GCP) product for running workloads on virtual machines (VMs). Multiple instances of NGINX Plus in active pairs load balance incoming connections across multiple application environments. + + +**Notes:** + +- GCE is a highly dynamic environment where the names and arrangement of GUI elements (such as menu items, checkboxes, and configuration navigation) are subject to change. We have made every effort to accurately represent the GCE GUI at the time of original publication, but options and locations might change over time. Use this guide as a reference and adapt to the current GCE working environment as necessary. +- The configuration described in this guide allows anyone from any public IP address to access the NGINX Plus instances directly without restriction. While this most flexibly accommodates common scenarios in a test environment, we do not recommend it for production use. Before deploying the architecture in production, we strongly recommend that you disable HTTP and HTTPS access to the app-1 and app-2 instances over the GCE external IP addresses, or remove the external IP addresses for all application instances so that they're accessible only on the internal GCE network. + + +## Design and Topology + +The deployment combines the following technologies: + +- [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) – Load balances HTTP connections across multiple instances of two different applications. Instructions are provided both for manual installation on a standard GCE VM image and for setting up the prebuilt NGINX Plus VM image available in the Google Marketplace. +- PHP-FPM – Supports the two sample applications. +- [GCE network load balancer](https://cloud.google.com/compute/docs/load-balancing/network) – Provides TCP connectivity between clients and the NGINX Plus load‑balancing (LB) instances in a GCP region, as well as maintaining session persistence for each NGINX Plus instance. +- [GCE instance groups](https://cloud.google.com/compute/docs/instance-groups) – Provide a mechanism for managing a group of VM instances as a unit. +- [GCE health checks](https://cloud.google.com/compute/docs/load-balancing/health-checks) – Maintain high availability of the NGINX Plus LB instances by controlling when GCE creates a new LB instance in the instance group. + +Topology of the all‑active deployment of NGINX Plus as the Google Cloud Platform load balancer. + +[Session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) is managed at the network layer by GCE network load balancer (based on client IP address) and at the application layer by the NGINX Plus LB instance (via a session cookie). When a new client connection enters the GCE network environment, GCE network load balancer assigns it to a specific frontend NGINX Plus LB instance, and the association persists as long as the LB instance is up and functional. The NGINX Plus LB instance forwards the request to a specific application instance in one of the two groups of them, selected using its default Round Robin algorithm. It also issues a cookie to the client to represent the session with that application instance, so that subsequent requests from the client are forwarded to that application instance as long as it is up and running. + +This deployment in this guide utilizes two groups of application instances – app-1 and app-2 – to demonstrate [load balancing](https://www.nginx.com/products/nginx/load-balancing/) between different application types, but the application configurations are the same in both groups. The deployment can be very easily adapted to distribute unique connections to different groups of application instances by creating discrete upstream blocks and doing content routing based on URI. Please refer to the reference documentation for details on configuring multiple [upstream server groups](https://nginx.org/en/docs/http/ngx_http_upstream_module.html). + + +## Prerequisites + +This guide assumes that you: + +- Have a [Google account](https://accounts.google.com/SignUp) (a separate GCP or GCE account is not needed). +- Have enrolled in a [free trial](https://cloud.google.com/free-trial) with available credit or have an established payment account with GCP. +- Have a basic working knowledge of GCE and its GUI control panel: + - Navigation + - Creating instances + - Managing IAM policies +- Understand basic networking. +- Have an NGINX Plus subscription. You can start a [free 30‑day trial](https://www.nginx.com/free-trial-request/) if you don't already have a paid subscription. +- Know how to install NGINX Plus, have a basic understanding of how it performs in load balancing and application delivery modes, and are familiar with its configuration syntax. +- Are familiar with GitHub and know how to [clone a repository](https://help.github.com/en/articles/cloning-a-repository/). + +All component names – projects, instances, templates, instance groups, and so on – are examples only. You can change them as suits your needs. + + +## Task 1: Creating a Project and Firewall Rules + +Create a new GCE project to host the all‑active NGINX Plus deployment. + +1. Log into the [GCP Console](http://console.cloud.google.com) at console.cloud.google.com. + +2. The GCP Home > Dashboard tab opens. Its contents depend on whether you have any existing projects. + + - If there are no existing projects, click the  Create a project  button. + + Screenshot of the Google Cloud Platform dashboard that appears when there are no existing projects (creating a project is the first step in configuring NGINX Plus as the Google Cloud load balancer) + + - If there are existing projects, the name of one of them appears in the upper left of the blue header bar (in the screenshot, it's  My Test Project ). Click the project name and select Create project from the menu that opens. + + Screenshot of the Google Cloud Platform page that appears when other projects already exist (creating a project is the first step in configuring NGINX Plus as the Google Cloud load balancer) + +3. Type your project name in the New Project window that pops up, then click CREATE. We're naming the project NGINX Plus All-Active-LB. + + Screenshot of the New Project pop-up window for naming a new project on the Google Cloud Platform, which is the first step in configuring NGINX Plus as the Google load balancer + +### Creating Firewall Rules + +Create firewall rules that allow access to the HTTP and HTTPS ports on your GCE instances. You'll attach the rules to all the instances you create for the deployment. + +1. Navigate to the Networking > Firewall rules tab and click  +  CREATE FIREWALL RULE. (The screenshot shows the default rules provided by GCE.) + + Screenshot of the Google Cloud Platform page for defining new firewall rules; when configuring NGINX Plus as the Google Cloud load balancer, we open ports 80, 443, and 8080 for it. + +2. Fill in the fields on the Create a firewall rule screen that opens: + + - **Name** – nginx-plus-http-fw-rule + - **Description** – Allow access to ports 80, 8080, and 443 on all NGINX Plus instances + - Source filter – On the drop-down menu, select either Allow from any source (0.0.0.0/0), or IP range if you want to restrict access to users on your private network. In the second case, fill in the Source IP ranges field that opens. In the screenshot, we are allowing unrestricted access. + - Allowed protocols and ports – tcp:80; tcp:8080; tcp:443 + + **Note:** As mentioned in the introduction to this guide, opening these ports for your application instances is appropriate only in a test environment. We strongly recommend that before deploying the architecture in production you create a new firewall rule for your application instances that blocks all port access to the external IP address, or disable external IP addresses for the instances to make them accessible only on the internal GCE network. + + - Target tags – nginx-plus-http-fw-rule + + Screenshot of the interface for creating a Google Compute Engine (GCE) firewall rule, used during deployment of NGINX Plus as the Google load balancer. + +3. Click the  Create  button. The new rule is added to the table on the Firewall rules tab. + + +## Task 2: Creating Source Instances + +Create three GCE source instances that will serve as templates for the instance groups you will create later on: one instance for the NGINX Plus load balancer and two instances for NGINX Plus PHP application servers. + +You can create source instances in either of two ways: + +- [Based on a standard GCE VM image](#source-vm), in which case you install NGINX Plus manually. This guide uses the Ubuntu LTS image that was most current at the time of publication (Ubuntu 16.04 LTS), but you can use any Unix or Linux OS that [NGINX Plus supports]({{< relref "../../technical-specs.md" >}}). +- [Based on the prebuilt NGINX Plus image](#source-prebuilt) in the Google Marketplace, which at the time of publication runs on Ubuntu 14.04 LTS. + +The instructions for the two methods are significantly different, but after you create the source instances all subsequent instructions are the same. + + +### Creating Source Instances from VM Images + +Create three source VM instances based on a GCE VM image. We're basing our instances on the Ubuntu 16.04 LTS image. + +1. Verify that the NGINX Plus All-Active-LB project is still selected in the Google Cloud Platform header bar. + +2. Navigate to the Compute Engine > VM instances tab. + +3. Click the  Create instance  button. The Create an instance page opens. + + +#### Creating the First Application Instance from a VM Image + +1. On the Create an instance page, modify or verify the fields and checkboxes as indicated (a screenshot of the completed page appears in the next step): + + - **Name** – nginx-plus-app-1 + - **Zone** – The GCP zone that makes sense for your location. We're using us-west1-a. + - Machine type – The appropriate size for the level of traffic you anticipate. We're selecting micro, which is ideal for testing purposes. + - Boot disk – Click Change. The Boot disk page opens to the OS images subtab. Perform the following steps: + + - Click the radio button for the Unix or Linux image of your choice (here, Ubuntu 16.04 LTS). + - Accept the default values in the Boot disk type and Size (GB) fields (Standard persistent disk and 10 respectively). + - Click the  Select  button. + + Screenshot of the 'Boot disk' page in Google Cloud Platform for selecting the OS on which a VM runs. In the deployment of NGINX Plus as the Google load balancer, we select Ubuntu 16.04 LTS. + + - Identity and API access – Unless you want more granular control over access, keep the defaults for the Service account field (Compute Engine default service account) and Access scopes radio button (Allow default access). + - **Firewall** – Verify that neither check box is checked (the default). The firewall rule invoked in the **Tags** field on the Management subtab (see Step 3 below) controls this type of access. + +2. Click Management, disk, networking, SSH keys to open that set of subtabs. (The screenshot shows the values entered in the previous step.) + + Screen shot of the 'Create an instance' page for an application server in the deployment of NGINX Plus as the Google Cloud Platform load balancer. + +3. On the Management subtab, modify or verify the fields as indicated: + + - **Description** – NGINX Plus app-1 Image + - **Tags** – nginx-plus-http-fw-rule + - **Preemptibility** – Off (recommended) (the default) + - Automatic restart – On (recommended) (the default) + - On host maintenance – Migrate VM instance (recommended) (the default) + + Screenshot of the Management subtab used during creation of a new VM instance, part of deploying NGINX Plus as the Google load balancer. + +4. On the Disks subtab, uncheck the checkbox labeled Delete boot disk when instance is deleted. + + Screenshot of the Disks subtab used during creation of a new VM instance, part of deploying NGINX Plus as the Google Cloud load balancer. + +5. On the Networking subtab, verify the default settings, in particular Ephemeral for External IP and Off for IP Forwarding. + + Screenshot of the Networking subtab used during creation of a new VM instance, part of deploying NGINX Plus as the Google Cloud load balancer. + +6. If you are using your own SSH public key instead of the default keys associated with your GCE identity, on the SSH Keys subtab paste the hexadecimal key string into the box that reads Enter entire key data. + + Screenshot of the SSH Keys subtab used during creation of a new VM instance, part of deploying NGINX Plus as the Google Cloud Platform load balancer. + +7. Click the  Create  button at the bottom of the Create an instance page. + + The VM instances summary page opens. It can take several minutes for the instance to be created. Wait to continue until the green check mark appears. + + Screenshot of the summary page that verifies the creation of a new VM instance, part of deploying NGINX Plus as the load balancer for Google Cloud. + + +#### Creating the Second Application Instance from a VM Image + +1. On the VM instances summary page, click CREATE INSTANCE. + +2. Repeat the steps in Creating the First Application Instance to create the second application instance, specifying the same values as for the first application instance, except: + + - In Step 1, **Name** – nginx-plus-app-2 + - In Step 3, **Description** – NGINX Plus app-2 Image + + +#### Creating the Load-Balancing Instance from a VM Image + +1. On the VM instances summary page, click CREATE INSTANCE. + +2. Repeat the steps in Creating the First Application Instance to create the load‑balancing instance, specifying the same values as for the first application instance, except: + + - In Step 1, **Name** – nginx-plus-lb + - In Step 3, **Description** – NGINX Plus Load Balancing Image + + +#### Configuring PHP and FastCGI on the VM-Based Instances + +Install and configure PHP and FastCGI on the instances. + +Repeat these instructions for all three source instances (nginx-plus-app-1, nginx-plus-app-2, and nginx-plus-lb). + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +1. Connect to the instance over SSH using the method of your choice. GCE provides a built-in mechanism: + + - Navigate to the Compute Engine > VM instances tab. + - In the instance's row in the table, click the triangle icon in the Connect column at the far right and select a method (for example, Open in browser window). + + Screenshot showing how to connect via SSH to a VM instance, part of deploying NGINX Plus as the Google load balancer. + +2. Working in the SSH terminal, install PHP 7 (the default PHP version for Ubuntu 16.04 LTS) and FastCGI. + + ```shell + apt-get install php7.0-fpm + ``` + +3. Edit the PHP 7 configuration to bind to a local network port instead of a Unix socket. Using your preferred text editor, remove the following line from /etc/php/7.0/fpm/pool.d: + + ```none + listen = /run/php/php7.0-fpm.sock + ``` + + and replace it with these two lines: + + ```none + listen = 127.0.0.1:9000 + listen.allowed_clients = 127.0.0.1 + ``` + +4. Restart PHP: + + ```shell + service php7.0-fpm restart + ``` + +5. Leave the SSH connection open for reuse in the next section. + + +#### Installing and Configuring NGINX Plus on the VM-Based Instances + +Now install NGINX Plus and download files that are specific to the all‑active deployment: + +- An NGINX Plus configuration file customized for the function performed by the instance (application server or load balancer) +- A set of content files (HTML, images, and so on) served by the application servers in the deployment + +Both the configuration and content files are available at the [NGINX GitHub repository](https://github.com/nginxinc/NGINX-Demos/tree/master/gce-nginx-plus-deployment-guide-files). + +Repeat these instructions for all three source instances (nginx-plus-app-1, nginx-plus-app-2, and nginx-plus-lb). + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +1. Install NGINX Plus. For instructions, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus.md" >}}). + +2. Clone the GitHub repository for the [all‑active load balancing deployment](https://github.com/nginxinc/NGINX-Demos/tree/master/gce-nginx-plus-deployment-guide-files). (Instructions for downloading the files directly from the GitHub repository are provided below, in case you prefer not to clone it.) + +3. Copy the contents of the usr\_share\_nginx subdirectory from the cloned repository to the local /usr/share/nginx directory, creating the local directory if necessary. (If you choose not to clone the repository, you need to download each file from the GitHub repository individually.) + +4. Copy the appropriate configuration file from the etc\_nginx\_conf.d subdirectory of the cloned repository to /etc/nginx/conf.d: + + - On both nginx-plus-app-1 and nginx-plus-app-2, copy gce-all-active-app.conf. + + You can also run the following commands to download the configuration file directly from the GitHub repository: + + ```none + cd /etc/nginx/conf.d/ + curl -o gce-all-active-app.conf https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/gce-all-active-app.conf + ``` + + or + + ```none + cd /etc/nginx/conf.d/ + wget https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/gce-all-active-app.conf + ``` + + - On nginx-plus-lb, copy gce-all-active-lb.conf. + + You can also run the following commands to download the configuration file directly from the GitHub repository: + + ```none + $ cd /etc/nginx/conf.d/ + $ curl -o gce-all-active-lb.conf https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/gce-all-active-lb.conf + ``` + + or + + ```none + cd /etc/nginx/conf.d/ + wget https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/gce-all-active-lb.conf + ``` + +5. On the LB instance (nginx-plus-lb), use your preferred text editor to open gce-all-active-lb.conf and change the `server` directives in the `upstream` block to reference the internal IP addresses of the nginx-plus-app-1 and nginx-plus-app-2 instances (substitute the address for the expression in angle brackets). No action is required on the two application instances themselves. + + You can look up internal IP addresses in the Internal IP column of the table on the Compute Engine > VM instances summary page. + + ```nginx + upstream upstream_app_pool { + server ; + server ; + zone upstream-apps 64k; + sticky cookie GCPPersist expires=300; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky cookie`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +6. Rename **default.conf** to **default.conf.bak** so that NGINX Plus does not load it. The configuration files provided for the all‑active deployment include equivalent instructions plus additional function‑specific directives. + + ```shell + mv default.conf default.conf.bak + ``` + +7. Enable the NGINX Plus [live activity monitoring](https://www.nginx.com/products/nginx/live-activity-monitoring/) dashboard for the instance by copying status.html from the etc\_nginx\_conf.d subdirectory of the cloned repository to /etc/nginx/conf.d. + + You can also run the following commands to download the configuration file directly from the GitHub repository: + + ```shell + cd /etc/nginx/conf.d/ + curl -o status.conf https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/status.conf + ``` + + or + + ```shell + cd /etc/nginx/conf.d/ + wget https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/status.conf + ``` + +8. Validate the NGINX Plus configuration and restart NGINX Plus: + + ```shell + nginx -t + nginx -s reload + ``` + +9. Verify the instance is working by accessing it at its external IP address. (As previously noted, we recommend blocking access to the external IP addresses of the application instances in a production environment.) The external IP address for the instance appears on the Compute Engine > VM instances summary page, in the External IP column of the table. + + - Access the index.html page either in a browser or by running this `curl` command. + + ```shell + curl http:// + ``` + + - Access its NGINX Plus live activity monitoring dashboard in a browser, at: + + https://_external-IP-address_:8080/status.html + +10. Proceed to [Task 3: Creating "Gold" Images](#gold). + + +### Creating Source Instances from Prebuilt NGINX Plus Images + +Create three source instances based on a prebuilt NGINX Plus image running on Ubuntu 14.04 LTS, available in the Google Marketplace. Google requires that you provision the first instance in the GCP Marketplace. Then you can clone the additional two instances from the first one. + + +#### Creating the First Application Instance from a Prebuilt Image + +1. Verify that the NGINX Plus All-Active-LB project is still selected in the Google Cloud Platform header bar. + +2. Navigate to the GCP Marketplace and search for nginx plus. + +3. Click the NGINX Plus box in the results area. + + Screenshot of NGINX Plus in the Google Cloud Platform Marketplace; from here, you can create a prebuilt NGINX Plus VM instance when deploying NGINX Plus as the load balancer for Google Cloud. + +4. On the NGINX Plus page that opens, click the  Launch on Compute Engine  button. + +5. Fill in the fields on the New NGINX Plus deployment page as indicated. + + - Deployment name – nginx-plus-app-1 + - **Zone** – The GCP zone that makes sense for your location. We're using us-west1-a. + - Machine type – The appropriate size for the level of traffic you anticipate. We're selecting micro, which is ideal for testing purposes. + - Disk type – Standard Persistent Disk (the default) + - Disk size in GB – 10 (the default and minimum allowed) + - Network name – default + - Subnetwork name – default + - **Firewall** – Verify that the Allow HTTP traffic checkbox is checked. + + Screenshot of the page for creating a prebuilt NGINX Plus VM instance when deploying NGINX Plus as the Google Cloud Platform load balancer. + +6. Click the  Deploy  button. + + It can take several minutes for the instance to deploy. Wait until the green check mark and confirmation message appear before continuing. + + Screenshot of the page that confirms the creation of a prebuilt NGINX Plus VM instance when deploying NGINX Plus as the Google load balancer. + +7. Navigate to the Compute Engine > VM instances tab and click nginx-plus-app-1-vm in the Name column in the table. (The -vm suffix is added automatically to the name of the newly created instance.) + + Screenshot showing how to access the page where configuration details for a VM instance can be modified during deployment of NGINX Plus as the Google Cloud load balancer. + +8. On the VM instances page that opens, click EDIT at the top of the page. In fields that can be edited, the value changes from static text to text boxes, drop‑down menus, and checkboxes. + +9. Modify or verify the indicated editable fields (non‑editable fields are not listed): + + - **Tags** – If a default tag appears in the field (for example, nginx-plus-app-1-tcp-80), click the X after its name to remove it, and type in nginx-plus-http-fw-rule. + - External IP – Ephemeral (the default) + - Boot disk and local disks – Uncheck the checkbox labeled Delete boot disk when when instance is deleted. + - Additional disks – No changes + - **Network** – We recommend keeping the default settings, but if you need to change them (if, for example, you're configuring a production environment), click default and then EDIT on the Network details page that opens. After making your changes click the  Save  button. + - **Firewall** – Verify that neither check box is checked (the default). The firewall rule named in the **Tags** field that's above on the current page (see the first bullet in this list) controls this type of access. + - Automatic restart – On (recommended) (the default) + - On host maintenance – Migrate VM instance (recommended) (the default) + - Custom metadata – No changes + - SSH Keys – If you are using your own SSH public key instead of the default keys associated with your GCE identity, paste the hexadecimal key string into the box that reads Enter entire key data. + - Serial port – Verify that the check box labeled Enable connecting to serial ports is not checked (the default). + + The screenshot shows the results of your changes, omitting some fields than cannot be edited or for which we recommend retaining the defaults. + + Screenshot showing the configuration modifications for a VM instance being deployed as part of setting up NGINX Plus as the Google load balancer. + +10. Click the  Save  button. + + +#### Creating the Second Application Instance from a Prebuilt Image + +Create the second application instance by cloning the first one. + +1. Navigate back to the summary page on the Compute Engine > VM instances tab (click the arrow that is circled in the following figure). + + Screenshot showing how to return to the VM instance summary page during deployment of NGINX Plus as the Google Cloud Platform load balancer. + +2. Click nginx-plus-app-1-vm in the Name column of the table (shown in the screenshot in Step 7 of Creating the First Application Instance). + +3. On the VM instances page that opens, click CLONE at the top of the page. + +4. On the Create an instance page that opens, modify or verify the fields and checkboxes as indicated: + + - **Name** – nginx-plus-app-2-vm. Here we're adding the -vm suffix to make the name consistent with the first instance; GCE does not add it automatically when you clone an instance. + - **Zone** – The GCP zone that makes sense for your location. We're using us-west1-a. + - Machine type – The appropriate size for the level of traffic you anticipate. We're selecting f1-micro, which is ideal for testing purposes. + - Boot disk type – New 10 GB standard persistent disk (the value inherited from nginx-plus-app-1-vm) + - Identity and API access – Set the Access scopes radio button to Allow default access and accept the default values in all other fields. If you want more granular control over access than is provided by these settings, modify the fields in this section as appropriate. + - **Firewall** – Verify that neither check box is checked (the default). + +6. Click Management, disk, networking, SSH keys to open that set of subtabs. + +7. Verify the following settings on the subtabs, modifying them as necessary: + + - Management – In the **Tags** field: nginx-plus-http-fw-rule + - Disks – The Deletion rule checkbox (labeled Delete boot disk when instance is deleted) is not checked + +9. Click the  Create  button. + + +#### Creating the Load-Balancing Instance from a Prebuilt Image + +Create the source load‑balancing instance by cloning the first instance again. + +Repeat Steps 2 through 7 of Creating the Second Application Instance. In Step 4, specify nginx-plus-lb-vm as the name. + + +#### Configuring PHP and FastCGI on the Prebuilt-Based Instances + +Install and configure PHP and FastCGI on the instances. + +Repeat these instructions for all three source instances (nginx-plus-app-1-vm, nginx-plus-app-2-vm, and nginx-plus-lb-vm). + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +1. Connect to the instance over SSH using the method of your choice. GCE provides a built‑in mechanism: + + - Navigate to the Compute Engine > VM instances tab. + - In the row for the instance in the table, click the triangle icon in the Connect column at the far right and select a method (for example, Open in browser window). + + The screenshot shows instances based on the prebuilt NGINX Plus images. + + Screenshot showing how to connect via SSH to a VM instance, part of deploying NGINX Plus as the Google load balancer. + +2. Working in the SSH terminal, install PHP 5 (the default PHP version for Ubuntu 14.04 LTS) and FastCGI. + + ```shell + apt-get install php5-fpm + ``` + +3. Edit the PHP 5 configuration to bind to a local network port instead of a Unix socket. Using your preferred text editor, remove the following line from /etc/php5/fpm/pool.d: + + ```none + Listen = /run/php/php5-fpm.sock + ``` + + and replace it with these two lines: + + ```none + Listen = 127.0.0.1:9000 + Listen.allowed_clients = 127.0.0.1 + ``` + +4. Restart PHP: + + ```shell + service php5-fpm restart + ``` + +5. Leave the SSH connection open for reuse in the next section. + + +#### Configuring NGINX Plus on the Prebuilt-Based Instances + +Now download files that are specific to the all‑active deployment: + +- An NGINX Plus configuration file customized for the function the instance performs (application server or load balancer) +- A set of content files (HTML, images, and so on) served by the application servers in the deployment + +Both the configuration and content files are available at the [NGINX GitHub repository](https://github.com/nginxinc/NGINX-Demos/tree/master/gce-nginx-plus-deployment-guide-files). + +Repeat these instructions for all three source instances (nginx-plus-app-1-vm, nginx-plus-app-2-vm, and nginx-plus-lb-vm). + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +1. Clone the GitHub repository for the [all‑active load balancing deployment](https://github.com/nginxinc/NGINX-Demos/tree/master/gce-nginx-plus-deployment-guide-files). (Instructions for downloading the files directly from the GitHub repository are provided below, in case you prefer not to clone it.) + +2. Copy the contents of the usr\_share\_nginx subdirectory from the cloned repository to the local /usr/share/nginx directory, creating the local directory if necessary. (If you choose not to clone the repository, you need to download each file from the GitHub repository individually.) + +3. Copy the appropriate configuration file from the etc\_nginx\_conf.d subdirectory of the cloned repository to /etc/nginx/conf.d: + + - On both nginx-plus-app-1-vm and nginx-plus-app-2-vm, copy gce-all-active-app.conf. + + You can also run the following commands to download the configuration file directly from the GitHub repository: + + ```shell + cd /etc/nginx/conf.d/ + curl -o gce-all-active-app.conf https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/gce-all-active-app.conf + ``` + + or + + ```none + cd /etc/nginx/conf.d/ + wget https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/gce-all-active-app.conf + ``` + + - On nginx-plus-lb-vm, copy gce-all-active-lb.conf. + + You can also run the following commands to download the configuration file directly from the GitHub repository: + + ```none + cd /etc/nginx/conf.d/ + curl -o gce-all-active-lb.conf https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/gce-all-active-lb.conf + ``` + + or + + ```none + cd /etc/nginx/conf.d/ + wget https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/gce-all-active-lb.conf + ``` + +4. On the LB instance (nginx-plus-lb-vm), use your preferred text editor to open gce-all-active-lb.conf and change the `server` directives in the `upstream` block to reference the internal IP addresses of the nginx-plus-app-1-vm and nginx-plus-app-2-vm instances. (No action is required on the two application instances themselves.) + + You can look up internal IP addresses in the Internal IP column of the table on the Compute Engine > VM instances summary page. + + ```nginx + upstream upstream_app_pool { + server ; + server ; + zone upstream-apps 64k; + sticky cookie GCPPersist expires=300; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky cookie`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +5. Rename **default.conf** to **default.conf.bak** so that NGINX Plus does not load it. The configuration files provided for the all‑active deployment include equivalent instructions plus additional function‑specific directives. + + ```shell + mv default.conf default.conf.bak + ``` + +6. Enable the NGINX Plus [live activity monitoring](https://www.nginx.com/products/nginx/live-activity-monitoring/) dashboard for the instance by copying status.html from the etc\_nginx\_conf.d subdirectory of the cloned repository to /etc/nginx/conf.d. + + You can also run the following commands to download the configuration file directly from the GitHub repository: + + ```shell + cd /etc/nginx/conf.d/ + curl -o status.conf https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/status.conf + ``` + + or + + ```shell + cd /etc/nginx/conf.d/ + wget https://github.com/nginxinc/NGINX-Demos/blob/master/gce-nginx-plus-deployment-guide-files/etc_nginx_conf.d/status.conf + ``` + +7. Validate the NGINX Plus configuration and restart NGINX Plus: + + ```shell + nginx -t + nginx -s reload + ``` + +8. Verify the instance is working by accessing it at its external IP address. (As previously noted, we recommend blocking access to the external IP addresses of the application instances in a production environment.) The external IP address for the instance appears on the Compute Engine > VM instances summary page, in the External IP column of the table. + + - Access the index.html page either in a browser or by running this `curl` command. + + ```shell + curl http:// + ``` + + - Access the NGINX Plus live activity monitoring dashboard in a browser, at: + + https://_external-IP-address-of-NGINX-Plus-server_:8080/dashboard.html + +9. Proceed to [Task 3: Creating "Gold" Images](#gold). + + +## Task 3: Creating "Gold" Images + +Create _gold images_, which are base images that GCE clones automatically when it needs to scale up the number of instances. They are derived from the instances you created in [Creating Source Instances](#source). Before creating the images, you must delete the source instances to break the attachment between them and the disk (you can't create an image from a disk that's attached to a VM instance). + +1. Verify that the NGINX Plus All-Active-LB project is still selected in the Google Cloud Platform header bar. + +2. Navigate to the Compute Engine > VM instances tab. + +3. In the table, select all three instances: + + - If you created source instances from [VM (Ubuntu) images](#source-vm): nginx-plus-app-1, nginx-plus-app-2, and nginx-plus-lb + - If you created source instances from [prebuilt NGINX Plus images](#source-prebuilt): nginx-plus-app-1-vm, nginx-plus-app-2-vm, and nginx-plus-lb-vm + +4. Click STOP in the top toolbar to stop the instances. + + Screenshot of the toolbar on the Google Compute Engine page that lists VM instances, used when deploying NGINX Plus as the Google Cloud load balancer. + +5. Click DELETE in the top toolbar to delete the instances. + + **Note:** If the pop‑up confirmation window warns that the boot disk for any instance will also be deleted, cancel the deletion, and perform these steps for each affected instance: + + - Navigate to the Compute Engine > VM instances tab and click the instance in the Name column in the table. (The screenshot shows nginx-plus-app-1-vm.) + + Screenshot showing how to access the page where configuration details for a VM instance can be modified during deployment of NGINX Plus as the Google Cloud load balancer. + + - On the VM instances page that opens, click EDIT at the top of the page. In fields that can be edited, the value changes from static text to text boxes, drop‑down menus, and checkboxes. + - In the Boot disk and local disks field, uncheck the checkbox labeled Delete boot disk when when instance is deleted. + - Click the  Save  button. + - On the VM instances summary page, select the instance in the table and click DELETE in the top toolbar to delete it. + +6. Navigate to the Compute Engine > Images tab. + +7. Click [+] CREATE IMAGE. + +8. On the Create an image page that opens, modify or verify the fields as indicated: + + - **Name** – nginx-plus-app-1-image + - **Family** – Leave the field empty + - **Description** – NGINX Plus Application 1 Gold Image + - **Encryption** – Automatic (recommended) (the default) + - **Source** – Disk (the default) + - Source disk – nginx-plus-app-1 or nginx-plus-app-1-vm, depending on the method you used to create source instances (select the source instance from the drop‑down menu) + +9. Click the  Create  button. + +10. Repeat Steps 7 through 9 to create a second image with the following values (retain the default values in all other fields): + + - **Name** – nginx-plus-app-2-image + - **Description** – NGINX Plus Application 2 Gold Image + - Source disk – nginx-plus-app-2 or nginx-plus-app-2-vm, depending on the method you used to create source instances (select the source instance from the drop‑down menu) + +11. Repeat Steps 7 through 9 to create a third image with the following values (retain the default values in all other fields): + + - **Name** – nginx-plus-lb-image + - **Description** – NGINX Plus LB Gold Image + - Source disk – nginx-plus-lb or nginx-plus-lb-vm, depending on the method you used to create source instances (select the source instance from the drop‑down menu) + +12. Verify that the three images appear at the top of the table on the Compute Engine > Images tab. + + +## Task 4: Creating Instance Templates + +Create _instance templates_, which are the compute workloads that are created in instance groups, either manually or automatically when GCE detects a failure. + + +### Creating the First Application Instance Template + +1. Verify that the NGINX Plus All-Active-LB project is still selected in the Google Cloud Platform header bar. + +2. Navigate to the Compute Engine > Instance templates tab. + +3. Click the  Create instance template  button. + +4. On the Create an instance template page that opens, modify or verify the fields as indicated: + + - **Name** – nginx-plus-app-1-instance-template + - Machine type – The appropriate size for the level of traffic you anticipate. We're selecting micro, which is ideal for testing purposes. + - Boot disk – Click Change. The Boot disk page opens. Perform the following steps: + + - Open the Custom Images subtab. + + Screenshot of the 'Boot disk' page in Google Cloud Platform for selecting the source instance of a new instance template, part of deploying NGINX Plus as the Google load balancer. + + - Select NGINX Plus All-Active-LB from the drop-down menu labeled Show images from. + + - Click the nginx-plus-app-1-image radio button. + + - Accept the default values in the Boot disk type and Size (GB) fields (Standard persistent disk and 10 respectively). + + - Click the  Select  button. + + - Identity and API access – Unless you want more granular control over access, keep the defaults in the Service account field (Compute Engine default service account) and Access scopes field (Allow default access). + - **Firewall** – Verify that neither check box is checked (the default). The firewall rule invoked in the **Tags** field on the Management subtab (see Step 6 below) controls this type of access. + +6. Click Management, disk, networking, SSH keys (indicated with a red arrow in the following screenshot) to open that set of subtabs. + + Screenshot of the interface for creating a Google Compute Engine (GCE) instance template, used during deployment of NGINX Plus as the Google load balancer. + +7. On the Management subtab, modify or verify the fields as indicated: + + - **Description** – NGINX Plus app-1 Instance Template + - **Tags** – nginx-plus-http-fw-rule + - **Preemptibility** – Off (recommended) (the default) + - Automatic restart – On (recommended) (the default) + - On host maintenance – Migrate VM instance (recommended) (the default) + + Screenshot of the Management subtab used during creation of a new VM instance template, part of deploying NGINX Plus as the Google load balancer. + +8. On the Disks subtab, verify that the checkbox labeled Delete boot disk when instance is deleted is checked. + + Instances created from this template are ephemeral instantiations of the gold image, so we want GCE to reclaim the disk when the instance is terminated. New instances are always based on the gold image, so there is no reason to have the instantiations persist on disk when the instance is deleted. + + Screenshot of the Disks subtab used during creation of a new VM instance template, part of deploying NGINX Plus as the Google Cloud load balancer. + +9. On the Networking subtab, verify the default settings of Ephemeral for External IP and Off for IP Forwarding. + + Screenshot of the Networking subtab used during creation of a new VM instance template, part of deploying NGINX Plus as the Google load balancer. + +10. If you are using your own SSH public key instead of the default keys associated with your GCE identity, on the SSH Keys subtab paste the hexadecimal key string into the box that reads Enter entire key data. + + Screenshot of the SSH Keys subtab used during creation of a new VM instance, part of deploying NGINX Plus as the Google Cloud Platform load balancer. + +11. Click the  Create  button. + + +### Creating the Second Application Instance Template + +1. On the Instance templates summary page, click CREATE INSTANCE TEMPLATE. + +2. Repeat Steps 4 through 10 of Creating the First Application Instance Template to create a second application instance template, specifying the same values as for the first instance template, except as noted: + + - In Step 4: + - **Name** – nginx-plus-app-2-instance-template + - Boot disk – Click the nginx-plus-app-2-image radio button + - In Step 6, **Description** – NGINX Plus app-2 Instance Template + + +### Creating the Load-Balancing Instance Template + +1. On the Instance templates summary page, click CREATE INSTANCE TEMPLATE. + +2. Repeat Steps 4 through 10 of Creating the First Application Instance Template to create the load‑balancing instance template, specifying the same values as for the first instance template, except as noted: + + - In Step 4: + - **Name** – nginx-plus-lb-instance-template. + - Boot disk – Click the nginx-plus-lb-image radio button + + - In Step 6, **Description** – NGINX Plus Load‑Balancing Instance Template + + +## Task 5: Creating Image Health Checks + +Define the simple HTTP health check that GCE uses to verify that each NGINX Plus LB image is running correctly (and to re-create an LB instance that is not running correctly). + +1. Verify that the NGINX Plus All-Active-LB project is still selected in the Google Cloud Platform header bar. + +2. Navigate to the Compute Engine > Health checks tab. + +3. Click the  Create a health check  button. + +4. On the Create a health check page that opens, modify or verify the fields as indicated: + + - **Name** – nginx-plus-http-health-check + - **Description** – Basic HTTP health check to monitor NGINX Plus instances + - **Protocol** – HTTP (the default) + - **Port** – 80 (the default) + - Request path – /status-old.html + +5. If the Health criteria section is not already open, click More. + +6. Modify or verify the fields as indicated: + + - Check interval – 10 seconds + - **Timeout** – 10 seconds + - Healthy threshold – 2 consecutive successes (the default) + - Unhealthy threshold – 10 consecutive failures + +7. Click the  Create  button. + + Screenshot of the interface for creating a health check in Google Compute Engine (GCE), which Google network load balancer uses to monitor NGINX Plus as the Google cloud load balancer. + + +## Task 6: Creating Instance Groups + +Create three independent instance groups, one for each type of function-specific instance. + +1. Verify that the NGINX Plus All-Active-LB project is still selected in the Google Cloud Platform header bar. + +2. Navigate to the Compute Engine > Instance groups tab. + +3. Click the  Create instance group  button. + + +### Creating the First Application Instance Group + +1. On the Create a new instance group page that opens, modify or verify the fields as indicated. Ignore fields that are not mentioned: + + - **Name** – nginx-plus-app-1-instance-group + - **Description** – Instance group to host NGINX Plus app-1 instances + - **Location** – + - Click the Single-zone radio button (the default). + - **Zone** – The GCP zone you specified when you created source instances (Step 1 of [Creating the First Application Instance from a VM Image](#source-vm-app-1) or Step 5 of [Creating the First Application Instance from a Prebuilt Image](#source-prebuilt)). We're using us-west1-a. + - Creation method – Use instance template radio button (the default) + - Instance template – nginx-plus-app-1-instance-template (select from the drop-down menu) + - **Autoscaling** – Off (the default) + - Number of instances – 2 + - Health check – nginx-plus-http-health-check (select from the drop-down menu) + - Initial delay – 300 seconds (the default) + +3. Click the  Create  button. + + Screenshot of the interface for creating a Google Compute Engine (GCE) instance group, used during deployment of NGINX Plus as the load balancer for Google Cloud. + + +### Creating the Second Application Instance Group + +1. On the Instance groups summary page, click CREATE INSTANCE GROUP. + +2. Repeat the steps in [Creating the First Application Instance Group](#groups-app-1) to create a second application instance group, specifying the same values as for the first instance template, except for these fields: + + - **Name** – nginx-plus-app-2-instance-group + - **Description** – Instance group to host NGINX Plus app-2 instances + - Instance template – nginx-plus-app-2-instance-template (select from the drop-down menu) + + +### Creating the Load-Balancing Instance Group + +1. On the Instance groups summary page, click CREATE INSTANCE GROUP. + +2. Repeat the steps in [Creating the First Application Instance Group](#groups-app-1) to create the load‑balancing instance group, specifying the same values as for the first instance template, except for these fields: + + - **Name** – nginx-plus-lb-instance-group + - **Description** – Instance group to host NGINX Plus load balancing instances + - Instance template – nginx-plus-lb-instance-template (select from the drop-down menu) + + +### Updating and Testing the NGINX Plus Configuration + +Update the NGINX Plus configuration on the two LB instances (nginx-plus-lb-instance-group-[a...z]) to list the internal IP addresses of the four application servers (two instances each of nginx-plus-app-1-instance-group-[a...z] and nginx-plus-app-2-instance-group-[a...z]). + +Repeat these instructions for both LB instances. + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +1. Connect to the LB instance over SSH using the method of your choice. GCE provides a built-in mechanism: + + - Navigate to the Compute Engine > VM instances tab. + - In the row for the instance in the table, click the triangle icon in the Connect column at the far right and select a method (for example, Open in browser window). + +3. Working in the SSH terminal, use your preferred text editor to modify gce-all-active-lb.conf, changing the `server` directives in the `upstream` block to reference the internal IP addresses of the two nginx-plus-app-1-instance-group-[a...z] instances and the two nginx-plus-app-2-instance-group-[a...z] instances. You can look up the addresses in the Internal IP column of the table on the Compute Engine > VM instances summary page. For example: + + ```nginx + upstream upstream_app_pool { + zone upstream-apps 64k; + + server 10.10.10.1; + server 10.10.10.2; + server 10.10.10.3; + server 10.10.10.4; + + sticky cookie GCPPersist expires=300; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky cookie`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +4. Validate the NGINX Plus configuration and restart NGINX Plus: + + ```shell + nginx -t + nginx -s reload + ``` + +5. Verify that the four application instances are receiving traffic and responding, by accessing the NGINX Plus live activity monitoring dashboard on the load‑balancing instance (nginx-plus-lb-instance-group-[a...z]). You can look up the instance's external IP address on the Compute Engine > VM instances summary page, in the External IP column of the table. + + https://_LB-external-IP-address_:8080/status.html + +6. Verify that NGINX Plus is load balancing traffic among the four application instance groups, by running this command on a separate client machine: + + ```shell + while true; do curl -s | grep Server: ;done + ``` + + If load balancing is working properly, the unique **Server** field from the index page for each application instance appears in turn. + + +## Task 7: Configuring GCE Network Load Balancer + +Set up GCE network load balancer to distribute incoming client traffic to the NGINX Plus LB instances. The first step is to reserve the static IP address that GCE network load balancer advertises to clients. + +1. Verify that the NGINX Plus All-Active-LB project is still selected in the Google Cloud Platform header bar. + +2. Navigate to the Networking > External IP addresses tab. + +3. Click the  Reserve static address  button. + +4. On the Reserve a static address page that opens, modify or verify the fields as indicated: + + - **Name** – nginx-plus-network-lb-static-ip + - **Description** – Static IP address for Network LB frontend to NGINX Plus LB instances + - **Type** – Click the Regional radio button (the default) + - **Region** – The GCP zone you specified when you created source instances (Step 1 of [Creating the First Application Instance from a VM Image](#source-vm-app-1) or Step 5 of [Creating the First Application Instance from a Prebuilt Image](#source-prebuilt)). We're using us-west1. + - Attached to – None (the default) + +5. Click the  Reserve  button. + + Screenshot of the interface for reserving a static IP address for Google Compute Engine network load balancer. + +6. Navigate to the Networking > Load balancing tab. + +7. Click the  Create load balancer  button. + +8. On the Load balancing page that opens, click Start configuration in the TCP Load Balancing box. + +9. On the page that opens, click the From Internet to my VMs and No (TCP) radio buttons (the defaults). + +10. Click the  Continue  button. The New TCP load balancer page opens. + +11. In the **Name** field, type nginx-plus-network-lb-frontend. + +12. Click Backend configuration in the left column to open the Backend configuration interface in the right column. Fill in the fields as indicated: + + - **Region** – The GCP region you specified in Step 4. We're using us-west1. + - **Backends** – With Select existing instance groups selected, select nginx-plus-lb-instance-group from the drop-down menu + - Backup pool – None (the default) + - Failover ratio – 10 (the default) + - Health check – nginx-plus-http-health-check + - Session affinity – Client IP + + Screenshot of the interface for backend configuration of GCE network load balancer, used during deployment of NGINX Plus as the Google Cloud Platform load balancer. + +13. Click Frontend configuration in the left column to open the Frontend configuration interface in the right column. + +14. Create three Protocol-IP-Port tuples, each with: + + - **Protocol** – TCP + - **IP** – The address you reserved in Step 5, selected from the drop-down menu (if there is more than one address, select the one labeled in parentheses with the name you specified in Step 5) + - **Port** – 80, 8080, and 443 in the three tuples respectively + +15. Click the  Create  button. + + Screenshot of the interface for frontend configuration of GCE network load balancer, used during deployment of NGINX Plus as the Google Cloud load balancer. + + +## Task 8: Testing the All-Active Load Balancing Deployment + +Verify that GCE network load balancer is properly routing traffic to both NGINX Plus LB instances. + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +Working on a separate client machine, run this command, using the static IP address you set in the previous section for GCE network load balancer: + +```shell +while true; do curl -s | grep Server: ;done +``` + +Alternatively, you can use a web browser to access this URL: + + **http://**_GCE-Network-LB-external-static-IP-address_ + +If load balancing is working properly, the unique **Server** field from the index page for each application instance appears in turn. + +To verify that high availability is working: + +1. Connect to one of the instances in the nginx-plus-lb-instance-group over SSH and run this command to force it offline: + + ```shell + iptables -A INPUT -p tcp --destination-port 80 -j DROP + ``` + +2. Verify that with one LB instance offline, the other LB instance still forwards traffic to the application instances (there might be a delay before GCE network load balancer detects that the first instance is offline). Continue monitoring and verify that GCE network load balancer then re-creates the first LB instance and brings it online. + +3. When the LB instance is back online, run this command to return it to its working state: + + ```shell + iptables -F + ``` + +### Revision History + +- Version 3 (July 2018) – Updates for Google Cloud Platform Marketplace +- Version 2 (April 2018) – Standardized information about root privilege and links to directive documentation +- Version 1 (November 2016) – Initial version (NGINX Plus R11) diff --git a/content/nginx/deployment-guides/load-balance-third-party/_index.md b/content/nginx/deployment-guides/load-balance-third-party/_index.md new file mode 100644 index 000000000..7fc732f2c --- /dev/null +++ b/content/nginx/deployment-guides/load-balance-third-party/_index.md @@ -0,0 +1,9 @@ +--- +description: Deployment guides for configuring F5 NGINX Plus to load balance or interoperate + with third-party technologies. +menu: + docs: + parent: NGINX Plus +title: Load Balancing Third-Party Servers +weight: 100 +--- diff --git a/content/nginx/deployment-guides/load-balance-third-party/apache-tomcat.md b/content/nginx/deployment-guides/load-balance-third-party/apache-tomcat.md new file mode 100644 index 000000000..fe42fa5f8 --- /dev/null +++ b/content/nginx/deployment-guides/load-balance-third-party/apache-tomcat.md @@ -0,0 +1,977 @@ +--- +description: Load balance Apache Tomcat application servers with NGINX Open Source + or the advanced features in F5 NGINX Plus, following our step-by-step setup instructions. +docs: DOCS-451 +doctypes: +- task +title: Load Balancing Apache Tomcat Servers with NGINX Open Source and NGINX Plus +toc: true +weight: 100 +--- + +This deployment guide explains how to use NGINX Open Source and F5 NGINX Plus to load balance HTTP and HTTPS traffic across a pool of Apache TomcatTM application servers. The detailed instructions in this guide apply to both cloud‑based and on‑premises deployments of Tomcat. + + + +## About NGINX Open Source and NGINX Plus + +[NGINX Open Source](https://nginx.org/en) is an open source web server and reverse proxy that has grown in popularity in recent years because of its scalability, outstanding performance, and small footprint. NGINX Open Source was first created to solve the C10K problem (serving 10,000 simultaneous connections on a single web server). NGINX Open Source's features and performance have made it a staple of high‑performance sites – it's [the #1 web server at the 100,000 busiest websites in the world](https://w3techs.com/technologies/cross/web_server/ranking). + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of NGINX Open Source. NGINX Plus is a complete application delivery platform, extending the power of NGINX Open Source with a host of enterprise‑ready capabilities that enhance a Tomcat deployment and are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + +## About Apache Tomcat + +Apache Tomcat is an open source software implementation of the Java Servlet, JavaServer Pages, Java Expression Language, and Java WebSocket technologies. + +We tested the procedures in this guide against Apache Tomcat 8.0. + + +## Prerequisites and System Requirements + +- A Tomcat application server installed and configured on a physical or virtual system. +- A Linux system to host NGINX Open Source or NGINX Plus. To avoid potential conflicts with other applications, we recommend you install the software on a fresh physical or virtual system. For the list of operating systems supported by NGINX Plus, see [NGINX Plus Technical Specifications]({{< relref "../../technical-specs.md" >}}). +- NGINX Open Source 1.9.5 and later, and NGINX Plus R7 and later. + +The instructions assume you have basic Linux system administration skills, including the following. Full instructions are not provided for these tasks. + +- Configuring and deploying a Tomcat application +- Installing Linux software from vendor‑supplied packages +- Editing configuration files +- Copying files between a central administrative system and Linux servers +- Running basic commands to start and stop services +- Reading log files + + + +### About Sample Values and Copying of Text + +- `example.com` is used as a sample domain name (in key names and configuration blocks). Replace it with your organization's name. +- Many NGINX Open Source and NGINX Plus configuration blocks in this guide list two sample Tomcat application servers with IP addresses 10.100.100.11 and 10.100.100.12. Replace these addresses with the IP addresses of your Tomcat servers. Include a line in the configuration block for each server if you have more or fewer than two. +- For readability reasons, some commands appear on multiple lines. If you want to copy and paste them into a terminal window, we recommend that you first copy them into a text editor, where you can substitute the object names that are appropriate for your deployment and remove any extraneous formatting characters that your browser might insert. +- Some of the examples in this guide are partial and require additional directives or parameters to be complete. You can download complete configuration files for basic and enhanced load balancing from the NGINX website, as instructed in [Creating and Modifying Configuration Files](#config-files). For details about a specific directive or parameter, see the [NGINX reference documentation](https://nginx.org/en/docs/). +- We recommend that you do not copy text from the configuration snippets in this guide into your configuration files. For the recommended way to create configuration files, see [Creating and Modifying Configuration Files](#config-files). + + +## Configuring an SSL/TLS Certificate for Client Traffic + +If you plan to enable SSL/TLS encryption of traffic between NGINX Open Source or NGINX Plus and clients of your Tomcat application, you need to configure a server certificate for NGINX Open Source or NGINX Plus. + +- SSL/TLS support is enabled by default in all [NGINX Plus packages](https://cs.nginx.com/) and [NGINX Open Source binaries](https://nginx.org/en/linux_packages.html) provided by NGINX. +- If you are compiling NGINX Open Source from source, include the `--with-http_ssl_module` parameter to enable SSL/TLS support for HTTP traffic (the corresponding parameter for TCP is `--with-stream_ssl_module`, and for email is `--with-mail_ssl_module`, but this guide does not cover either of those protocol types). +- If using binaries from other providers, consult the provider documentation to determine if they support SSL/TLS. + +There are several ways to obtain a server certificate, including the following. For your convenience, step-by-step instructions are provided for the second and third options. + +- If you already have an SSL/TLS certificate for NGINX Open Source or NGINX Plus installed on another UNIX or Linux system (including systems running Apache HTTP Server), copy it to the **/etc/nginx/ssl** directory on the NGINX Open Source or NGINX Plus server. +- Generate a self‑signed certificate as described in [Generating a Self‑Signed Certificate](#certificate-self-signed) below. This is sufficient for testing scenarios, but clients of production deployments generally require a certificate signed by a certificate authority (CA). +- Request a new certificate from a CA or your organization's security group, as described in [Generating a Certificate Request](#certificate-request) below. + +For more details on SSL/TLS termination, see the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/_index.md" >}}). + + + +### Generating a Self-Signed Certificate + +Generate a public‑private key pair and a self‑signed server certificate in PEM format that is based on them. + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Generate the key pair in PEM format (the default). To encrypt the private key, include the `-des3` parameter. (Other encryption algorithms are available, listed on the man page for the [genrsa](https://www.openssl.org/docs/manmaster/man1/openssl-genrsa.html) command.) You are prompted for the passphrase used as the basis for encryption. + + ```shell + root# openssl genrsa -des3 -out ~/private-key.pem 2048 + Generating RSA private key ... + Enter pass phrase for private-key.pem: + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/private-key.pem /private-key.pem.backup + ``` + +4. Generate the certificate. Include the `-new` and `-x509` parameters to make a new self‑signed certificate. Optionally include the `-days` parameter to change the key's validity lifetime from the default of 30 days (10950 days is about 30 years). Respond to the prompts with values appropriate for your testing deployment. + + ```none + root# openssl req -new -x509 -key ~/private-key.pem -out ~/self-cert.pem -days 10950 + ``` + +5. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Open Source or NGINX Plus server. + + + +### Generating a Certificate Request + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Create a private key to be packaged in the certificate. + + ```shell + root# openssl genrsa -out ~/example.com.key 2048 + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/example.com.key /example.com.key.backup + ``` + +4. Create a Certificate Signing Request (CSR) file. + + ```shell + root# openssl req -new -sha256 -key ~/example.com.key -out ~/example.com.csr + ``` + +5. Request a certificate from a CA or your internal security group, providing the CSR file (**example.com.csr**). As a reminder, never share private keys (**.key** files) directly with third parties. + + The certificate needs to be PEM format rather than in the Windows‑compatible PFX format. If you request the certificate from a CA website yourself, choose NGINX or Apache (if available) when asked to select the server platform for which to generate the certificate. + +6. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + +## Creating and Modifying Configuration Files + +To reduce errors, this guide has you copy directives from files provided by NGINX into your configuration files, instead of using a text editor to type in the directives yourself. Then you go through the sections in this guide (starting with [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)) to learn how to modify the directives as required for your deployment. + +As provided, there is one file for basic load balancing (with NGINX Open Source or NGINX Plus) and one file for enhanced load balancing (with NGINX Plus). If you are installing and configuring NGINX Open Source or NGINX Plus on a fresh Linux system and using it only to load balance Tomcat traffic, you can use the provided file as your main configuration file, which by convention is called **/etc/nginx/nginx.conf**. + +We recommend, however, that instead of a single configuration file you use the scheme that is set up automatically when you install an NGINX Plus package, especially if you already have an existing NGINX Open Source or NGINX Plus deployment or plan to expand your use of NGINX Open Source or NGINX Plus to other purposes in future. In the conventional scheme, the main configuration file is still called **/etc/nginx/nginx.conf**, but instead of including all directives in it, you create separate configuration files for different HTTP‑related functions and store the files in the **/etc/nginx/conf.d** directory. You then use the `include` directive in the `http` context of the main file to read in the contents of the function‑specific files. + +To download the complete configuration file for basic load balancing: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/tomcat-basic.conf > tomcat-basic.conf +``` + +To download the complete configuration file for enhanced load balancing: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/tomcat-enhanced.conf > tomcat-enhanced.conf +``` + +(You can also access the URL in a browser and download the file that way.) + +To set up the conventional configuration scheme, add an `http` configuration block in the main **nginx.conf** file, if it does not already exist. (The standard placement is below any global directives.) Add this `include` directive with the appropriate filename: + +```nginx +http { + include conf.d/tomcat-(basic|enhanced).conf; +} +``` + +You can also use wildcard notation to reference all files that pertain to a certain function or traffic type in the appropriate context block. For example, if you name all HTTP configuration files _function_-http.conf, this is an appropriate `include` directive: + +```nginx +http { + include conf.d/*-http.conf; +} +``` + +For reference purposes, the text of the full configuration files is included in this document: + +- [Full Configuration for Basic Load Balancing](#full-configuration-basic) +- [Full Configuration for Enhanced Load Balancing](#full-configuration-enhanced) + +We recommend, however, that you do not copy text directly from this document. It does not necessarily use the same mechanisms for positioning text (such as line breaks and white space) as text editors do. In text copied into an editor, lines might run together and indenting of child statements in configuration blocks might be missing or inconsistent. The absence of formatting does not present a problem for NGINX Open Source or NGINX Plus, because (like many compilers) they ignore white space during parsing, relying solely on semicolons and curly braces as delimiters. The absence of white space does, however, make it more difficult for humans to interpret the configuration and modify it without making mistakes. + + +### About Reloading Updated Configuration + +We recommend that each time you complete a set of updates to the configuration, you run the `nginx -t` command to test the configuration file for syntactic validity. + +```none +root# nginx -t +nginx: the configuration file /etc/nginx/nginx.conf syntax is ok +nginx: configuration file /etc/nginx/nginx.conf test is successful +``` + +To tell NGINX Open Source or NGINX Plus to start using the new configuration, run one of the following commands: + +```none +root# nginx -s reload +``` + +or + +```none +root# service nginx reload +``` + + +## Configuring Basic Load Balancing with NGINX Open Source or NGINX Plus + +This section explains how to set up NGINX Open Source or NGINX Plus as a load balancer in front of two Tomcat servers. The instructions in the first two sections are mandatory: + +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) +- [Configuring Basic Load Balancing](#load-balancing-basic) + +The instructions in the remaining sections are optional, depending on the requirements of your application: + +- [Configuring Basic Session Persistence](#session-persistence-basic) +- [Configuring Proxy of WebSocket Traffic](#websocket) +- [Configuring Content Caching](#caching) +- [Configuring HTTP/2 Support](#http2) + +The complete configuration file appears in [Full Configuration for Basic Load Balancing](#full-configuration-basic). + +If you are using NGINX Plus, you can configure additional enhanced features after you complete the configuration of basic load balancing. See [Configuring Enhanced Load Balancing with NGINX Plus](#enhanced). + + +### Configuring Virtual Servers for HTTP and HTTPS Traffic + +These directives define virtual servers for HTTP and HTTPS traffic in separate `server` blocks in the top‑level `http` configuration block. All HTTP requests are redirected to the HTTPS server. + +1. Configure a `server` block that listens for requests for **"https://example.com"** received on port 443. + + The `ssl_certificate` and `ssl_certificate_key` directives are required; substitute the names of the certificate and private key you chose in [Configuring an SSL/TLS Certificate for Client Traffic](#tls-certificate). + + The other directives are optional but recommended. + + ```nginx + # In the 'http' block + server { + listen 443 ssl; + server_name example.com; + + ssl_certificate /etc/nginx/ssl/example.com.crt; + ssl_certificate_key /etc/nginx/ssl/example.com.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name), [ssl_certificate and ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate), [ssl_prefer_server_ciphers](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_prefer_server_ciphers), [ssl_session_cache](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) + +2. Configure a `server` block that permanently redirects requests received on port 80 for **"http://example.com"** to the HTTPS server, which is defined in the previous step. + + If you're not using SSL/TLS for client connections, omit the `return` directive. When instructed in the remainder of this guide to add directives to the `server` block for HTTPS traffic, add them to this block instead. + + ```nginx + # In the 'http' block + server { + listen 80; + server_name example.com; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) + +For more information about configuring SSL/TLS, see the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/security-controls/terminating-ssl-http.md" >}}) and the reference documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + +### Configuring Basic Load Balancing + +To configure load balancing, you first create a named _upstream group_, which lists the backend servers among which client requests are distributed. You then set up NGINX Open Source or NGINX Plus as a reverse proxy and load balancer by referring to the upstream group in one or more `proxy_pass` directives. + +1. Configure an upstream group called **tomcat** with two Tomcat application servers listening on port 8080, one on IP address 10.100.100.11 and the other on 10.100.100.12. + + ```nginx + # In the 'http' block + upstream tomcat { + server 10.100.100.11:8080; + server 10.100.100.12:8080; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +2. In the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), include two `location` blocks: + + - The first one matches HTTPS requests in which the path starts with /tomcat-app/, and proxies them to the **tomcat** upstream group we created in the previous step. + + - The second one funnels all traffic to the first `location` block, by doing a temporary redirect of all requests for **"http://example.com/"**. + + ```nginx + # In the 'server' block for HTTPS traffic + location /tomcat-app/ { + proxy_pass http://tomcat; + } + + location = / { + return 302 /tomcat-app/; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) + + +Note that these blocks handle only standard HTTPS traffic. If you want to load balance WebSocket traffic, you need to add another `location` block as described in [Configuring Proxy of WebSocket Traffic](#websocket). + +By default, NGINX Open Source and NGINX Plus use the Round Robin algorithm for load balancing among servers. The load balancer runs through the list of servers in the upstream group in order, forwarding each new request to the next server. In our example, the first request goes to 10.100.100.11, the second to 10.100.100.12, the third to 10.100.100.11, and so on. For information about the other available load-balancing algorithms, see the NGINX Plus Admin Guide. + +In NGINX Plus, you can also set up dynamic reconfiguration of an upstream group when the set of backend servers changes, using DNS or an API; see [Enabling Dynamic Reconfiguration of Upstream Groups](#reconfiguration). + +For more information about proxying and load balancing, see [NGINX Reverse Proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) and [HTTP Load Balancing]({{< relref "../../admin-guide/load-balancer/http-load-balancer.md" >}}) in the NGINX Plus Admin Guide, and the reference documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) and [Upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html) modules. + + +### Configuring Basic Session Persistence + +If your application requires basic session persistence (also known as _sticky sessions_), you can implement it in NGINX Open Source with the IP Hash load‑balancing algorithm. (NGINX Plus offers a more sophisticated form of session persistence, as described in [Configuring Advanced Session Persistence](#session-persistence-advanced).) + +With the IP Hash algorithm, for each request a hash based on the client's IP address is calculated and associated with one of the upstream servers. All requests with that hash are sent to that server, thus establishing session persistence. + +If the client has an IPv6 address, the hash is based on the entire address. If it has an IPv4 address, the hash is based on just the first three octets of the address. This is designed to optimize for ISP clients that are assigned IP addresses dynamically from a subnetwork (/24) range. However, it is not effective in these cases: + +- The majority of the traffic to your site is coming from one forward proxy or from clients on the same /24 network, because in that case IP Hash maps all clients to the same server. + +- A client's IP address can change during the session, for example when a mobile client switches from a WiFi network to a cellular one. + +To configure session persistence in NGINX, add the `ip_hash` directive to the `upstream` block created in [Configuring Basic Load Balancing](#load-balancing-basic): + +```nginx +# In the 'http' block +upstream tomcat { + ip_hash; + server 10.100.100.11:8080; + server 10.100.100.12:8080; +} +``` + +Directive documentation: [ip_hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash) + +You can also use the Hash load‑balancing method for session persistence, with the hash based on any combination of text and [NGINX variables](https://nginx.org/en/docs/varindex.html) you specify. For example, you can hash on full (four‑octet) client IP addresses with the following configuration. + +```nginx +# In the 'http' block +upstream tomcat { + hash $remote_addr; + server 10.100.100.11:8080; + server 10.100.100.12:8080; +} +``` + +Directive documentation: [hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash) + + +### Configuring Proxy of WebSocket Traffic + +The WebSocket protocol (defined in [RFC 6455](https://tools.ietf.org/html/rfc6455)) enables simultaneous two‑way communication over a single TCP connection between clients and servers, where each side can send data independently from the other. To initiate the WebSocket connection, the client sends a handshake request to the server, upgrading the request from standard HTTP to WebSocket. The connection is established if the handshake request passes validation, and the server accepts the request. When a WebSocket connection is created, a browser client can send data to a server while simultaneously receiving data from that server. + +Tomcat 8 does not enable WebSocket by default, but instructions for enabling it are available in the [Tomcat documentation](https://tomcat.apache.org/tomcat-8.0-doc/web-socket-howto.html). If you want to use NGINX Open Source or NGINX Plus to proxy WebSocket traffic to your Tomcat application servers, add the directives discussed in this section. + + NGINX Open Source and NGINX Plus by default use HTTP/1.0 for upstream connections. To be proxied correctly, WebSocket connections require HTTP/1.1 along with some other configuration directives that set HTTP headers: + +```nginx +# In the 'http' block +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +# In the 'server' block for HTTPS traffic +location /wstunnel/ { + proxy_pass http://tomcat; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; +} +``` + +Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map), [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + +The first `proxy_set_header` directive is needed because the `Upgrade` request header is hop-by-hop; that is, the HTTP specification explicitly forbids proxies from forwarding it. This directive overrides the prohibition. + +The second `proxy_set_header` directive sets the `Connection` header to a value that depends on the test in the `map` block: if the request has an `Upgrade` header, the `Connection` header is set to `upgrade`; otherwise, it is set to `close`. + +For more information about proxying WebSocket traffic, see [WebSocket proxying](https://nginx.org/en/docs/http/websocket.html) and [NGINX as a WebSocket Proxy](https://www.nginx.com/blog/websocket-nginx/). + + + +### Configuring Content Caching + +Caching responses from your Tomcat app servers can both improve response time to clients and reduce load on the servers, because eligible responses are served immediately from the cache instead of being generated again on the server. There are a variety of useful directives that can be used to fine‑tune caching behavior; for a detailed discussion, see [A Guide to Caching with NGINX and NGINX Plus](https://www.nginx.com/blog/nginx-caching-guide/). + +To enable basic caching in NGINX Open Source or NGINX Plus, add the following configuration: + +1. Include the `proxy_cache_path` directive to create the local disk directory **/tmp/NGINX_cache/** for use as a cache. The `keys_zone` parameter allocates 10 megabytes (MB) of shared memory for a zone called **backcache**, which is used to store cache keys and metadata such as usage timers. A 1‑MB zone can store data for about 8,000 keys. + + ```nginx + # In the 'http' block + proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + ``` + + Directive documentation: [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + +2. In the `location` block that matches HTTPS requests in which the path starts with /tomcat-app/, include the `proxy_cache` directive to reference the cache created in the previous step. + + ```nginx + # In the 'server' block for HTTPS traffic + location /tomcat-app/ { + proxy_pass http://tomcat; + proxy_cache backcache; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) + +By default, the cache key is similar to this string of [NGINX variables](https://nginx.org/en/docs/varindex.html): `$scheme$proxy_host$request_uri`. To change the list of variables, specify them with the `proxy_cache_key` directive. One effective use of this directive is to create a cache key for each user based on the `JSESSIONID` cookie. This is useful when the cache is private, for example containing shopping cart data or other user‑specific resources. Include the `JSESSIONID` cookie in the cache key with this directive: + +```nginx +proxy_cache_key $proxy_host$request_uri$cookie_jessionid; +``` + + Directive documentation: [proxy_cache_key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key) + +For more information about caching, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/content-cache/content-caching.md/" >}}) and the reference documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) module. + + +### Configuring HTTP/2 Support + +HTTP/2 is fully supported in both NGINX Open Source 1.9.5 and later, and NGINX Plus R7 and later. As always, we recommend you run the latest version of software to take advantage of improvements and bug fixes. + +- If using NGINX Open Source, note that in version 1.9.5 and later the SPDY module is completely removed from the codebase and replaced with the [HTTP/2](https://nginx.org/en/docs/http/ngx_http_v2_module.html) module. After upgrading to version 1.9.5 or later, you can no longer configure NGINX Open Source to use SPDY. If you want to keep using SPDY, you need to compile NGINX Open Source from the sources in the [NGINX 1.8.x branch](https://nginx.org/en/download.html). + +- In NGINX Plus R8 and later, NGINX Plus supports HTTP/2 by default. (Support for SPDY is deprecated as of that release). Specifically: + + In NGINX Plus R11 and later, the nginx-plus package continues to support HTTP/2 by default, but the nginx-plus-extras package available in previous releases is deprecated by [dynamic modules](https://www.nginx.com/products/nginx/dynamic-modules/). + + For NGINX Plus R8 through R10, the nginx-plus and nginx-plus-extras packages support HTTP/2 by default. + + If using NGINX Plus R7, you must install the nginx-plus-http2 package instead of the nginx-plus or nginx-plus-extras package. + +To enable HTTP/2 support, add the `http2` directive in the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), so that it looks like this: + +```nginx +# In the 'server' block for HTTPS traffic +listen 443 ssl; +http2 on; +``` + +Directive documentation: [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) + +To verify that HTTP/2 translation is working, you can use the "HTTP/2 and SPDY indicator" plug‑in available for [Google Chrome](https://chrome.google.com/webstore/detail/http2-and-spdy-indicator/mpbpobfflnpcgagjijhmgnchggcjblin?hl=en) and [Firefox](https://addons.mozilla.org/en-US/firefox/addon/http2-indicator/). + + +### Full Configuration for Basic Load Balancing + +The full configuration for basic load balancing appears here for your convenience. It goes in the `http` context. The complete file is available for [download](https://www.nginx.com/resource/conf/tomcat-basic.conf) from the NGINX website. + +We recommend that you do not copy text directly from this document, but instead use the method described in [Creating and Modifying Configuration Files](#config-files) to include these directives in your configuration – add an `include` directive to the `http` context of the main **nginx.conf** file to read in the contents of **/etc/nginx/conf.d/tomcat-basic.conf**. + +```nginx +proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +upstream tomcat { + # Use IP Hash for session persistence + ip_hash; + # List of Tomcat application servers + server 10.100.100.11:8080; + server 10.100.100.12:8080; +} + +server { + listen 80; + server_name example.com; + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } +} + +server { + listen 443 ssl; + http2 on; + + server_name example.com; + ssl_certificate /etc/nginx/ssl/example.com.crt; + ssl_certificate_key /etc/nginx/ssl/example.com.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + + # Load balance requests for '/tomcat-app/' across Tomcat application + # servers + location /tomcat-app/ { + proxy_pass http://tomcat; + proxy_cache backcache; + } + + # Return a temporary redirect to '/tomcat-app/' when user requests '/' + location = / { + return 302 /tomcat-app/; + } + + # WebSocket configuration + location /wstunnel/ { + proxy_pass https://tomcat; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } +} + +``` + + +## Configuring Enhanced Load Balancing with NGINX Plus + +This section explains how to configure enhanced load balancing with some of the extended features in NGINX Plus. + +**Note:** Before setting up the enhanced features described in this section, you must complete the instructions for basic load balancing in these two sections: + +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) +- [Configuring Basic Load Balancing](#load-balancing-basic). + +Except as noted, all optional basic features (described in the other subsections of [Configuring Basic Load Balancing in NGINX Open Source and NGINX Plus](#basic) can be combined with the enhanced features described here. + +The features described in the following sections are all optional. + +- [Configuring Advanced Session Persistence](#session-persistence-advanced) +- [Configuring Application Health Checks](#health-checks) +- [Enabling Live Activity Monitoring](#live-activity-monitoring) +- [Enabling Dynamic Reconfiguration of Upstream Groups](#reconfiguration) + +The complete configuration file appears in [Full Configuration for Enhanced Load Balancing](#full-configuration-enhanced). + + +### Configuring Advanced Session Persistence + +NGINX Plus provides more sophisticated session persistence methods than NGINX Open Source, implemented in three variants of the `sticky` directive. In the following example, we add the `sticky route` directive to the upstream group we created in [Configuring Basic Load Balancing](#load-balancing-basic), to base session persistence on the `jvmRoute` attribute set by the Tomcat application. + +#### Configuring Sticky Route-Based Session Persistence + +1. In the NGINX Plus configuration, remove or comment out the `ip_hash` directive, leaving only the `server` directives: + + ```nginx + # In the 'http' block + upstream tomcat { + #ip_hash; + server 10.100.100.11:8080; + server 10.100.100.12:8080; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +2. Add the following lines to the configuration files for your backend Tomcat servers to append an identifier based on the `jvmRoute` attribute (here, set to either `a` or `b`) to the end of the `JSESSIONID` cookie value: + + ```none + # On host 10.100.100.11 + + # On host 10.100.100.12 + + ``` + +3. Configure NGINX Plus to select the upstream server by inspecting the `JSESSIONID` cookie and URL in each request and extracting the `jvmRoute` value. + + ```nginx + # In the 'http' block + map $cookie_jsessionid $route_cookie { + ~.+\.(?Pw+)$ $route; + } + + map $request_uri $route_uri { + ~jsessionid=.+\.(?Pw+)$ $route_uri; + } + + upstream tomcat { + server 10.100.100.11:8080 route=a; + server 10.100.100.12:8080 route=b; + sticky route $route_cookie $route_uri; + } + ``` + + Directive documentation: [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map), [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky route`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + + - The first `map` directive extracts the final element (following the period) of the `JSESSIONID` cookie, recording it in the `$route_cookie` variable. + - The second `map` directive extracts the final element (following the period) from the trailing `jsessionid=` element of the request URL, recording it in the `$route_uri` variable. + - The `sticky route` directive tells NGINX Plus to use the value of the first nonempty variable it finds in the list of parameters, which here is the two variables set by the `map` directives. In other words, it uses the final element of the `JESSIONID` cookie if it exists, and the final element of the `jessionid=` URL element otherwise. + + The `route` parameters to the `server` directives mean that the request is sent to 10.100.100.11 if the value is `a` and to 10.100.100.12 if the value is `b`. + +#### Configuring Sticky Learn-Based Session Persistence + +The `sticky learn` directive is another option for session persistence; in this case the session identifier is the `JSESSIONID` cookie created by your Tomcat application. + +1. Remove or comment out the `ip_hash` directive in the `upstream` block as in Step 1 above. + +2. Include the `sticky learn` directive in the `upstream` block: + + ```nginx + # In the 'http' block + upstream tomcat { + server 10.100.100.11:8080; + server 10.100.100.12:8080; + sticky learn create=$upstream_cookie_JSESSIONID + lookup=$cookie_JSESSIONID + zone=client_sessions:1m; + } + ``` + + Directive documentation: [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map), [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky learn`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + + - The `create` and `lookup` parameters specify how new sessions are created and existing sessions are searched for, respectively. For new sessions, NGINX Plus sets the session identifier to the value of the `$upstream_cookie_JSESSIONID` variable, which captures the `JSESSIONID` cookie sent by the Tomcat application server. When checking for existing sessions, it uses the `JSESSIONID` cookie sent by the client (the `$cookie_JSESSIONID` variable) as the session identifier. + + Both parameters can be specified more than once (each time with a different variable), in which case NGINX Plus uses the first nonempty variable for each one. + + - The `zone` argument creates a shared memory zone for storing information about sessions. The amount of memory allocated – here, 1 MB – determines how many sessions can be stored at a time (the number varies by platform). The name assigned to the zone – here, `client_sessions` – must be unique for each `sticky` directive. + +For more information about session persistence, see the NGINX Plus Admin Guide. + + +### Configuring Application Health Checks + +Health checks are out-of-band HTTP requests sent to a server at fixed intervals. They are used to determine whether a server is responsive and functioning correctly, without requiring an actual request from a client. + +Because the `health_check` directive is placed in the `location` block, we can enable different health checks for each application. + +1. In the `location` block that matches HTTPS requests in which the path starts with /tomcat-app/ (created in [Configuring Basic Load Balancing](#load-balancing-basic)), add the `health_check` directive. + + Here we configure NGINX Plus to send an out-of-band request for the top‑level URI **/** (slash) to each of the servers in the **tomcat** upstream group every 2 seconds, which is more aggressive than the default 5‑second interval. If a server does not respond correctly, it is marked down and NGINX Plus stops sending requests to it until it passes five subsequent health checks in a row. We include the `match` parameter to define a nondefault set of health‑check tests. + + ```nginx + # In the 'server' block for HTTPS traffic + location /tomcat-app/ { + proxy_pass http://tomcat; + proxy_cache backcache; + health_check interval=2s fails=1 passes=5 uri=/ match=tomcat_check; + } + ``` + + Directive documentation: [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) + +2. In the `http` context, include a `match` directive to define the tests that a server must pass to be considered functional. In this example, it must return status code `200`, the `Content-Type` response header must be `text/html`, and the response body must match the indicated regular expression. + + ```nginx + # In the 'http' block + match health_check { + status 200; + header Content-Type = text/html; + body ~ "Apache Tomcat/8"; + } + ``` + + Directive documentation: [match](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) + +3. In the **tomcat** upstream group, include the `zone` directive to define a shared memory zone that stores the group's configuration and run‑time state, which are shared among worker processes. + + ```nginx + # In the 'http' block + upstream tomcat { + zone tomcat 64k; + + server 10.100.100.11:8080; + server 10.100.100.12:8080; + # ... + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +NGINX Plus also has a slow‑start feature that is a useful auxiliary to health checks. When a failed server recovers, or a new server is added to the upstream group, NGINX Plus slowly ramps up the traffic to it over a defined period of time. This gives the server time to "warm up" without being overwhelmed by more connections than it can handle as it starts up. For more information, see the NGINX Plus Admin Guide. + +For example, to set a slow‑start period of 30 seconds for your Tomcat application servers, include the `slow_start` parameter to their `server` directives: + +```nginx +# In the 'upstream' block +#... +server 10.100.100.11:8080 slow_start=30s; +server 10.100.100.12:8080 slow_start=30s; +``` + +For information about customizing health checks, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/load-balancer/http-health-check.md" >}}). + + +### Enabling Live Activity Monitoring + +NGINX Plus includes a live activity monitoring interface that provides key load and performance metrics in real time, including TCP metrics in NGINX Plus R6 and later. Statistics are reported through a RESTful JSON interface, making it very easy to feed the data to a custom or third‑party monitoring tool. There is also a built‑in dashboard. Follow these instructions to deploy it. + +Dashboard tab in NGINX Plus live activity monitoring dashboard + +For more information about live activity monitoring, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/monitoring/live-activity-monitoring.md" >}}). + +The quickest way to configure the module and the built‑in dashboard is to download the sample configuration file from the NGINX website, and modify it as necessary. For more complete instructions, see [Live Activity Monitoring of NGINX Plus in 3 Simple Steps](https://www.nginx.com/blog/live-activity-monitoring-nginx-plus-3-simple-steps/). + +1. Download the **status.conf** file to the NGINX Plus server: + + ```shell + # cd /etc/nginx/conf.d + # curl https://www.nginx.com/resource/conf/status.conf > status.conf + ``` + +2. Read in **status.conf** at the top‑level `http` configuration block in the main **nginx.conf** file: + + ```nginx + # In the 'http' block + include conf.d/status.conf; + ``` + + Directive documentation: [include](https://nginx.org/en/docs/ngx_core_module.html#include) + + If you are using the conventional configuration scheme and your existing `include` directives use the wildcard notation discussed in [Creating and Modifying Configuration Files](#config-files), you can either add a separate `include` directive for **status.conf** as shown above, or change the name of **status.conf** so it is captured by the wildcard in an existing `include` directive in the `http` block. For example, changing it to status-http.conf means it is captured by the `include` directive for `*-http.conf`. + +3. Comments in **status.conf** explain which directives you must customize for your deployment. In particular, the default settings in the sample configuration file allow anyone on any network to access the dashboard. We strongly recommend that you restrict access to the dashboard with one or more of the following methods: + + - **IP address‑based access control lists (ACLs)**. In the sample configuration file, uncomment the `allow` and `deny` directives, and substitute the address of your administrative network for 10.0.0.0/8. Only users on the specified network can access the status page. + + ```nginx + allow 10.0.0.0/8; + deny all; + ``` + + Directive documentation: [allow](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow), [deny](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny) + + - **HTTP Basic authentication** as defined in [RFC 7617](https://tools.ietf.org/html/rfc7617). In the sample configuration file, uncomment the `auth_basic` and `auth_basic_user_file` directives and add user entries to the **/etc/nginx/users** file (for example, by using an **htpasswd** generator). If you have an Apache installation, another option is to reuse an existing **htpasswd** file. + + ```nginx + auth_basic on; + auth_basic_user_file /etc/nginx/users; + ``` + + Directive documentation: [auth_basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic), [auth_basic_user_file](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file) + + - **Client certificates**, which are part of a complete configuration of SSL/TLS. For more information, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the reference documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + - **Firewall**. Configure your firewall to disallow outside access to the port for the dashboard (8080 in the sample configuration file). + +4. In each upstream group that you want to monitor, include the `zone` directive to define a shared memory zone that stores the group's configuration and run‑time state, which are shared among worker processes. + + For example, to monitor your Tomcat application servers, add the `zone` directive to the **tomcat** upstream group (if you followed the instructions in [Configuring Application Health Checks](#health-checks), you already made this change). + + ```nginx + # In the 'http' block + upstream tomcat { + zone tomcat 64k; + + server 10.100.100.11:8080; + server 10.100.100.12:8080; + #... + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +5. In the `server` block for HTTPS traffic (created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)), add the `status_zone` directive: + + ```nginx + # In the 'server' block for HTTPS traffic + status_zone tomcat; + ``` + + Directive documentation: [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone) + +When you reload the NGINX Plus configuration file, for example by running the `nginx -s reload` command, the NGINX Plus dashboard is available immediately at **http://_nginx-plus-server-address_:8080**. + + +### Enabling Dynamic Reconfiguration of Upstream Groups + +With NGINX Plus, you can reconfigure load‑balanced server groups (both HTTP and TCP/UDP) dynamically using either the Domain Name System (DNS) or the NGINX Plus API introduced in NGINX Plus R13. See the NGINX Plus Admin Guide for a more detailed discussion of the DNS and [API]({{< relref "../../admin-guide/load-balancer/dynamic-configuration-api.md" >}}) methods. + +#### Configuring the API Method + +To enable dynamic reconfiguration of your upstream group of Tomcat app servers using the NGINX Plus API, you need to grant secured access to it. You can use the API to add or remove servers, dynamically alter their weights, and set their status as `primary`, `backup`, or `down`. + +1. Include the `zone` directive in the **tomcat** upstream group to create a shared memory zone for storing the group's configuration and run‑time state, which makes the information available to all worker processes. (If you configured [application health checks](#health-checks) or [live activity monitoring](#live-activity-monitoring), you already made this change.) + + ```nginx + # In the 'http' block + upstream tomcat { + zone tomcat 64k; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + # ... + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +2. In the `server` block for HTTPS traffic (created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)), add a new `location` block for the NGINX Plus API, which enables dynamic reconfiguration among other features. It contains the `api` directive (**api** is also the conventional name for the location, as used here). + + (If you configured [live activity monitoring](#live-activity-monitoring) by downloading the **status.conf** file, it already includes this block.) + + We strongly recommend that you restrict access to the location so that only authorized administrators can access the NGINX Plus API. The `allow` and `deny` directives in the following example permit access only from the localhost address (127.0.0.1). + + ```nginx + # In the 'server' block for HTTPS traffic + location /api { + api write=on; + allow 127.0.0.1; + deny all; + } + ``` + + Directive documentation: [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html), [api](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) + +#### Configuring the DNS Method + +In the `http` block, add the `resolver` directive pointing to your DNS server. In the **tomact** `upstream` block add the `resolve` parameter to the `server` directive, which instructs NGINX Plus to periodically re‑resolve the domain name (here, **example.com** here) with DNS. + +Also include the `zone` directive in the `upstream` block to create a shared memory zone for storing the upstream group's configuration and run‑time state, which makes the information available to all worker processes. (If you configured [application health checks](#health-checks) or [live activity monitoring](#live-activity-monitoring), you already made this change.) + + +```nginx +# In the 'http' block +resolver ; + +upstream tomcat { + zone tomcat 64k; + server example.com resolve; +} +``` + +Directive and parameter documentation: [resolve](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#resolve), [resolver](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +[NGINX Plus Release 9](https://www.nginx.com/blog/nginx-plus-r9-released/#dns-srv) and later can also use the additional information in DNS `SRV` records, such as the port number. Include the `service` parameter to the `server` directive, along with the `resolve` parameter: + +```nginx +# In the 'http' block +resolver ; + +upstream tomcat { + zone tomcat 64k; + server example.com service=http resolve; +} +``` + +Parameter documentation: [service](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#service) + + +### Full Configuration for Enhanced Load Balancing + +The full configuration for enhanced load balancing appears here for your convenience. It goes in the `http` context. The complete file is available for [download](https://www.nginx.com/resource/conf/tomcat-enhanced.conf) from the NGINX website. + +We recommend that you do not copy text directly from this document, but instead use the method described in [Creating and Modifying Configuration Files](#config-files) to include these directives in your configuration – namely, add an `include` directive to the `http` context of the main **nginx.conf** file to read in the contents of **/etc/nginx/conf.d/tomcat-enhanced.conf**. + +```nginx +proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + +# WebSocket configuration +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +# Extract the data after the final period (.) in the +# JSESSIONID cookie and store it in the $route_cookie variable. +map $cookie_jsessionid $route_cookie { + ~.+\.(?Pw+)$ $route; +} + +# Search the URL for a trailing jsessionid parameter, extract the +# data after the final period (.), and store it in +# the $route_uri variable. +map $request_uri $route_uri { + jsessionid=.+\.(?Pw+)$ $route; +} + +# Application health checks +match tomcat_check { + status 200; + header Content-Type = text/html; + body ~ "Apache Tomcat/8"; +} + +upstream tomcat { + # Shared memory zone for application health checks, live activity + # monitoring, and dynamic reconfiguration + zone tomcat 64k; + + # List of Tomcat application servers + server 10.100.100.11:8080 slow_start=30s; + server 10.100.100.12:8080 slow_start=30s; + + # Session persistence based on the jvmRoute value in + # the JSESSION ID cookie + sticky route $route_cookie $route_uri; + + # Uncomment the following directive (and comment the preceding + # 'sticky route' and JSESSIONID 'map' directives) for session + # persistence based on the JSESSIONID + #sticky learn create=$upstream_cookie_JSESSIONID + # lookup=$cookie_JSESSIONID + # zone=client_sessions:1m; +} + +server { + listen 80; + server_name example.com; + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } +} + +server { + listen 443 ssl; + http2 on; + + server_name example.com; + + # Required for live activity monitoring of HTTPS traffic + status_zone tomcat; + + ssl_certificate /etc/nginx/ssl/example.com.crt; + ssl_certificate_key /etc/nginx/ssl/example.com.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + + # Load balance requests for '/tomcat-app/' across Tomcat application + # servers + location /tomcat-app/ { + proxy_pass http://tomcat; + proxy_cache backcache; + + # Active health checks + health_check interval=2s fails=1 passes=5 uri=/ match=tomcat_check; + } + + # Return a 302 redirect to '/tomcat-app/' when user requests '/' + location = / { + return 302 /tomcat-app/; + } + + # WebSocket configuration + location /wstunnel/ { + proxy_pass http://tomcat; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } + + # Secured access to the NGINX Plus API + location /api { + api write=on; + allow 127.0.0.1; # Permit access from localhost + deny all; # Deny access from everywhere else + } +} + +``` + + +## Resources + +- [NGINX Plus Overview](https://www.nginx.com/products/nginx) +- [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/_index.md" >}}) +- [NGINX Wiki](https://www.nginx.com/resources/wiki/) + +### Revision History + +- Version 6 (May 2024) – Update about HTTP/2 support (the [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) directive) +- Version 5 (October 2019) – Fix syntax of comment in config snippet (add missing `#`) +- Version 4 (February 2018) – Update for NGINX Plus API (NGINX Plus R14) +- Version 3 (April 2017) – Update about HTTP/2 support and dynamic modules (NGINX Plus R11, NGINX Open Source 1.11.5) +- Version 2 (January 2016) – Update about HTTP/2 support (NGINX Plus R8, NGINX Open Source 1.9.9) +- Version 1 (January 2016) – Initial version (NGINX Plus R7, NGINX Open Source 1.9.5) + diff --git a/content/nginx/deployment-guides/load-balance-third-party/microsoft-exchange.md b/content/nginx/deployment-guides/load-balance-third-party/microsoft-exchange.md new file mode 100644 index 000000000..48b1f81a1 --- /dev/null +++ b/content/nginx/deployment-guides/load-balance-third-party/microsoft-exchange.md @@ -0,0 +1,1354 @@ +--- +description: Load balance Microsoft Exchange servers with the advanced features in + F5 NGINX Plus, following our step-by-step setup instructions. +docs: DOCS-452 +doctypes: +- task +title: Load Balancing Microsoft Exchange Servers with NGINX Plus +toc: true +weight: 100 +--- + +This deployment guide explains how to use F5 NGINX Plus to load balance traffic across a pool of Microsoft ExchangeTM servers. You can deploy Exchange and NGINX Plus on premises, in a private cloud, or in public clouds including Amazon Web Services (AWS), the Google Cloud Platform, and Microsoft Azure. The guide covers the different installation types, and provides complete instructions for customizing both NGINX Plus and Exchange as required. + + +## About NGINX Plus + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of the [NGINX Open Source](https://nginx.org/en) software. NGINX Plus is a complete application delivery platform, extending the power of NGINX Open Source with a host of enterprise‑ready capabilities that enhance a Microsoft Exchange server deployment and are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + +[NGINX Plus Release 6 R6](https://www.nginx.com/blog/nginx-plus-r6-released/#tcp-load-balancing) introduced full‑featured load balancing of TCP traffic, with support extended to UDP in [NGINX Plus R9](https://www.nginx.com/blog/nginx-plus-r9-released/#udp-lb) and continuing enhancements for both protocols included in subsequent releases (for details, see [NGINX Plus Releases]({{< relref "/nginx/releases.md" >}}). Because NGINX Plus load balances TCP, HTTP, and HTTPS traffic, it's ideal for Microsoft Exchange deployments, which use all three protocols. + + +## Using This Guide + +After reviewing [Prerequisites and System Requirements](#prereqs), perform the instructions in these sections: + +- [Configuring DNS, Exchange, and Firewalls](#dns-exchange-firewall) +- [Configuring an SSL/TLS Certificate for Client Traffic](#tls-certificate) +- [Configuring NGINX Plus](#config) +- [Completing the Configuration of Load Balancing](#config-completing) + +In the final section, you choose between basic Exchange load balancing and an enhanced configuration with greater fault tolerance, application health checks, and live activity monitoring. + +### About Sample Values and Copying of Text + +- `company.com` is used as a sample organization name (mostly in key names and DNS entries). Replace it with your organization's name. +- Many NGINX Plus configuration blocks in this guide list two sample client access servers (CASs) with IP addresses 10.0.0.237 and 10.0.0.238. Replace these addresses with the IP addresses of your CASs. Include a line in the configuration block for each CAS if you have more or fewer than two. In contrast, port numbers are obligatory values except where noted. +- For readability reasons, some commands appear on multiple lines. If you want to copy and paste them into a terminal window, we recommend that you first copy them into a text editor, where you can substitute the object names that are appropriate for your deployment and remove any extraneous formatting characters that your browser might insert. +- We recommend that you do not copy text from the configuration snippets in this guide into your configuration files. For the recommended way to create configuration files, see [Creating and Modifying Configuration Files](#config-files). + + +## Prerequisites and System Requirements + +- Microsoft Exchange 2013 or later, installed and configured on a system running Windows Server 2012 or later. The load balancing functionality is not supported for earlier versions of Microsoft Exchange. + + Exchange CASs can be configured for [HTTP Basic authentication](https://tools.ietf.org/html/rfc7617), as specified in this guide. + +- A Linux system to host NGINX Plus (in on‑premises and private‑cloud deployments). To avoid potential conflicts with other applications, we recommend that you install NGINX Plus on a fresh system. For the list of Linux distributions supported by NGINX Plus, see [NGINX Plus Technical Specifications]({{< relref "../../technical-specs.md" >}}). + +- NGINX Plus R7 or later. TCP load balancing is available in NGINX Plus R6 and later. Support for the Microsoft NT LAN Manager (NTLM) is available in NGINX Plus R7 and later. + +The instructions assume you have basic Linux system administration skills, including the following. Full instructions are not provided for these tasks. + +- Installing Linux software from vendor‑supplied packages +- Editing configuration files +- Copying files between a central administrative system and Linux servers +- Running basic commands to start and stop services +- Reading log files + +Similarly, the instructions assume you have basic Windows system administration skills, including the following. + +- Logging in to a system through Microsoft Remote Desktop +- Running PowerShell commands +- Restarting Internet Information Services (IIS) services + + +## Configuring DNS, Exchange, and Firewalls + +To prepare for the configuration of NGINX Plus as your Exchange load balancer, first perform the steps in these sections: + +- [Configuring DNS](#dns) +- [Configuring Exchange](#exchange) +- [Configuring Firewalls](#firewall) + + +### Configuring DNS + +Exchange requires the following Domain Name System (DNS) records for normal operation. Create or modify them as necessary. + +- An `MX` record for mail delivery. + + ```none + company.com. 300 MX 10 mail.company.com + ```å + +- An `A` record for the main email server. Replace `X.X.X.X` with the public IP address of your NGINX Plus server. + + ```none + mail.company.com. 60 A X.X.X.X + ``` + +- A `TXT` record for Sender Policy Framework (SPF). Replace `X.X.X.X` with the public IP address of your NGINX Plus server. For more information about SPF records, see the [Microsoft documentation](https://docs.microsoft.com/en-us/microsoft-365/security/office-365-security/set-up-spf-in-office-365-to-help-prevent-spoofing). + + ```none + company.com. 300 TXT "v=spf1 mx a ip4:X.X.X.X/32 -all" + ``` + +- An `SRV` record for the Autodiscover service. + + ```none + _autodiscover._tcp.company.com. 60 SRV 1 10 443 mail.company.com + ``` + + +### Configuring Exchange + +Use Exchange Management Shell (PowerShell) to configure Exchange on each CAS. When running the `Set` command, you must always specify the CAS name and directory (together referred to as the _identity_) for the object being configured. You can either include the `-Identity` flag on the `Set` command line, or type the identity at the prompt that appears if you don't include the `-Identity` flag (as in the commands in this section). + +To obtain an identity if you don't know it, run the `Get` command that corresponds to the `Set` command you need to run. Include the `fl` (formatted list) keyword on the `Get` command line to view the complete output from the command. For example, to obtain identity information for the `Set-OutlookAnywhere` command, run this command: + +```none +C:\> Get-OutlookAnywhere | fl +``` + +Identities are case insensitive, and generally include spaces, parentheses, and backslashes, as in these examples for a CAS called **CAS01**. + +```none +CAS01\Rpc (Default Web Site) +CAS01\mapi (Default Web Site) +CAS01\Autodiscover (Default Web Site) +``` + +Repeat these commands on each CAS in your deployment: + +1. Working on the CAS, log in to PowerShell under an account with administrative privileges. + +2. Open the Start menu and run the Exchange Management Shell, which is a terminal window. + +3. Configure the external hostname for Outlook Anywhere. + + ```none + C:\> Set-OutlookAnywhere -ExternalHostname mail.company.com + ``` + +4. Configure the desired authentication methods for Outlook Anywhere. This sample command configures [HTTP Basic authentication](https://tools.ietf.org/html/rfc7617). + + ```none + C:\> Set-OutlookAnywhere -ExternalClientsRequireSsl 1 + -DefaultAuthenticationMethod basic + -ExternalClientAuthenticationMethod basic + -IISAuthenticationMethods basic + -InternalClientAuthenticationMethod basic + ``` + +5. Configure the desired authentication methods for Autodiscover. This sample command configures HTTP Basic authentication. + + ```none + C:\> Set-AutodiscoverVirtualDirectory + -LiveIdNegotiateAuthentication 0 + -WSSecurityAuthentication 0 -LiveIdBasicAuthentication 0 + -BasicAuthentication 1 -DigestAuthentication 0 + -WindowsAuthentication 0 -OAuthAuthentication 0 + -AdfsAuthentication 0 + ``` + +6. Configure the desired authentication methods for Offline Address Book (OAB). This sample command configures HTTP Basic authentication. + + ```none + C:\> Set-OabVirtualDirectory -WindowsAuthentication 0 + -BasicAuthentication 1 + -ExternalUrl https://mail.company.com/OAB + ``` + +7. If Exchange 2013 Service Pack 1 (SP 1) or later is installed, configure the desired authentication methods for MAPI over HTTP. This sample command configures HTTP Basic authentication. (Note that MAP over HTTP is not available in Exchange 2013 without SP 1.) + + ```none + C:\> Set-MapiVirtualDirectory + -InternalURL http://mail.company.com/mapi + -ExternalURL https://mail.company.com/mapi + -IISAuthenticationMethods Basic + ``` + +8. If Exchange 2013 SP 1 or later is installed, enable MAPI Over HTTP. + + ```none + C:\> Set-OrganizationConfig -MapiHTTPEnabled + ``` + + +### Configuring Firewalls + +If there is a firewall between the NGINX Plus server and other applications in your Exchange deployment, configure it to pass through traffic on the ports specified in the table. The columns represent the three types of applications that communicate with the NGINX Plus server – email clients, the NGINX Plus live activity monitoring dashboard on your administrative network, and CASs – and the **x** indicates that the port must be open. + + +{{}} + +|**Port** | **Protocol** | **Email clients** | **Admin network** | **CASs** | +| ---| ---| ---| ---| --- | +|25 | SMTP | x | x | x | +|80 | HTTP | x | | | +|443 | HTTPS | x | x | | +|993 | IMAPS | x | x | | +|8080 | HTTP (NGINX Plus dashboard) | x | | | + +{{}} + + + +## Configuring an SSL/TLS Certificate for Client Traffic + +To enable SSL/TLS encryption of traffic between NGINX Plus and Exchange clients, you need to configure a server certificate for NGINX Plus. + +There are several ways to obtain the required certificate, including the following. For your convenience, step-by-step instructions are provided for the second and third options. + +- If you already have an SSL/TLS server certificate installed on another UNIX or Linux system (including systems running NGINX Open Source, NGINX Plus, or Apache HTTP Server), copy it to the **/etc/nginx/ssl** directory on the NGINX Plus server. +- Request a new certificate from a certificate authority (CA) or your organization's security group, as described in [Generating a Certificate Request with the openssl Command](#certificate-request). +- If you already have an SSL/TLS certificate on a Windows system, see [Exporting and Converting an SSL/TLS Certificate from an IIS Server](#certificate-iis). + + +### Generating a Certificate Request with the openssl Command + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Create a private key to be packaged in the certificate. + + ```shell + root# openssl genrsa -out ~/company.com.key 2048 + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/company.com.key /company.com.key.backup + ``` + +4. Create a Certificate Signing Request (CSR) file. + + ```shell + root# openssl req -new -sha256 -key ~/company.com.key -out ~/company.com.csr + ``` + +5. Request a certificate from a CA or your internal security group, providing the CSR file (**company.com.csr**). As a reminder, never share private keys (**.key** files) directly with third parties. + + The certificate needs to be PEM format rather than in the Windows‑compatible PFX format. If you request the certificate from a CA website yourself, choose NGINX or Apache (if available) when asked to select the server platform for which to generate the certificate. + +6. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + In the configuration files for Exchange load balancing that you can download from the NGINX website, the filenames for the certificate and private key are **company.com.crt** and **company.com.key**. For a discussion of the file and download instructions, see [Creating and Modifying Configuration Files](#config-files). + + +### Exporting and Converting an SSL/TLS Certificate from an IIS Server + +On Windows systems, SSL/TLS certificates are packaged in a Public‑Key Cryptography Standards (PKCS) archive file with extension **.pfx**. You need to export the **.pfx** file and convert the contents to the Linux‑compatible PEM format. + +Working in the Microsoft Management Console, perform these steps: + +1. Open the **Certificates** snap‑in. + +2. In the left‑hand navigation pane, click the **Certificates** folder in the logical store for the certificate you want to export (in the following figure, it is Personal > Certificates). + +3. In the main pane, right‑click the certificate to be exported (in the following figure, it is **cas01.company.com**). + +4. On the menu that pops up, select **All Tasks**, then click **Export**. + + ![Certificates snap-in to Microsoft Management Console, used to export SSL/TLS certificate](/nginx/images/oracle-ebs-iis-certlm.png) + +5. In the Certificate Export Wizard window that pops up, click **Yes, export the private key**. (This option appears only if the private key is marked as exportable and you have access to it.) + +6. If prompted for a password (used to encrypt the **.pfx** file before export), type it in the **Password** and **Confirm** fields. (Remember the password, as you need to provide it when importing the bundle to NGINX Plus.) + +7. Click **Next**. + +8. In **File name** field, type the filename and path to the location for storing the exported file (certificate and private key). Click **Next**, then **Finish**. + +9. Copy the **.pfx** file to the NGINX Plus server. + +Working on the NGINX Plus server (which must have the `openssl` software installed), perform these steps: + +1. Log in as the root user. + +2. Extract the private key file from the **.pfx** file. You are prompted first for the password protecting the **.pfx** file (see Step 6 above), then for a new password used to encrypt the private key file being created (**company.com.key.encrypted** in the following sample command). + + ```shell + root# openssl pkcs12 -in exported-certs.pfx -nocerts -out company.com.key.encrypted + ``` + +3. Decrypt the key file. At the prompt, type the password you created in the previous step for the private key file. + + ```shell + root# openssl rsa -in company.com.key.encrypted -out company.com.key + ``` + +4. Extract the certificate file. + + ```shell + root# openssl pkcs12 -in exported-cert.pfx -clcerts -nokeys -out company.com.crt + ``` + +5. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + In the configuration files for Exchange load balancing that you can download from the NGINX website, the filenames for the certificate and private key are **company.com.crt** and **company.com.key**. For a discussion of the file and download instructions, see [Creating and Modifying Configuration Files](#config-files). + + +## Configuring NGINX Plus + +You can set up NGINX Plus as your Exchange load balancer in either a basic or enhanced configuration. Basic configuration provides complete load‑balancing and reverse‑proxy functions. The enhanced configuration adds the following features to make your deployment more reliable and easier to manage: + +- Fine‑grained URL location control – Exchange CASs interact with the various applications used by clients on different types of devices. Creating a separate `location` block for each application isolates the effect of an application outage to users of that application only. Other applications on the CAS continue to run normally. +- Health checks – Exchange includes a health‑check mechanism for several applications that integrates easily with NGINX Plus health checks. +- Live activity monitoring – NGINX Plus includes a dashboard that provides key load and performance metrics in real time, including TCP metrics in NGINX Plus R6 and later. + +For more information about these features in an Exchange load balancer, see [Completing the Configuration of Enhanced Load Balancing](#config-enhanced). + +The instructions in these sections are mandatory: + +- [Installing NGINX Plus](#installing) +- [Creating and Modifying Configuration Files](#config-files) +- [Configuring Global Settings](#global-settings) +- [Configuring TCP Load Balancing](#tcp-load-balancing) +- [Configuring Global HTTP and HTTPS Settings](#global-http) +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) + +Optionally, you can enable support for HTTP/2 in [Configuring HTTP/2 Support](#http2). + + +### Installing NGINX Plus + +You can install NGINX Plus on premises, in a private cloud, or in a public cloud such as the Amazon Elastic Compute Cloud (EC2), the Google Cloud Platform, or Microsoft Azure. See the instructions for your installation type: + +- On‑premises or private cloud – [Installing NGINX Plus]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus.md" >}}) +- Amazon EC2 – [Installing NGINX Plus AMIs on Amazon EC2]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-amazon-web-services.md" >}}) +- Google Compute Cloud – [Installing NGINX Plus on the Google Cloud Platform]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-google-cloud-platform.md" >}}) +- Microsoft Azure – [Installing NGINX Plus on Microsoft Azure]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-microsoft-azure.md" >}}) + + +### Creating and Modifying Configuration Files + +To reduce errors, this guide has you copy directives from files provided by NGINX into your configuration files, instead of using a text editor to type in the directives yourself. Then you go through the sections in this guide (starting with [Configuring Global Settings](#global-settings)) to learn how to modify the directives as required for your deployment. + +As provided, there is one file for basic load balancing and one file for enhanced load balancing. If you are installing and configuring NGINX Plus on a fresh Linux system and using it only to load balance Exchange traffic, you can use the provided file as your main NGINX Plus configuration file, which by convention is called **/etc/nginx/nginx.conf**. + +We recommend, however, that instead of a single configuration file you use the scheme that is set up automatically when you install an NGINX Plus package – especially if you already have an existing NGINX Open Source or NGINX Plus deployment or plan to expand your use of NGINX Plus to other purposes in future. In the conventional scheme, the main configuration file is still called **/etc/nginx/nginx.conf**, but instead of including all directives in it, you create separate configuration files for different functions and store the files in the **/etc/nginx/conf.d** directory. You then use the [include](https://nginx.org/en/docs/ngx_core_module.html#include) directive in the appropriate contexts of the main file to read in the contents of the function‑specific files. + +To download the complete configuration file for basic or enhanced load balancing from the NGINX website, run the appropriate commands: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/exchange-basic.conf > exchange-basic.conf +``` + +or + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/exchange-enhanced.conf > exchange-enhanced.conf +``` + +(You can also access the URL in a browser and download the file.) + +To set up the conventional configuration scheme, perform these steps: + +1. In the main **nginx.conf** file, add `http` and `stream` configuration blocks, if they do not already exist. (The standard placement is below any global directives; see [Configuring Global Settings](#global-settings).) Add the indicated `include` directives (you can change the filenames if you wish). + + ```nginx + http { + include conf.d/exchange-http.conf; + } + + stream { + include conf.d/exchange-stream.conf; + } + ``` + + Directive documentation: [include](https://nginx.org/en/docs/ngx_core_module.html#include) + + You can also use wildcard notation to read all function‑specific files for either HTTP or TCP traffic into the appropriate context block. For example, if you name all HTTP configuration files _function_-http.conf and all TCP configuration files _function_-stream.conf (the filenames we specify in this section conform to this pattern), the wildcarded `include` directives are: + + ```nginx + http { + include conf.d/*-http.conf; + } + + stream { + include conf.d/*-stream.conf; + } + ``` + +2. In the **/etc/nginx/conf.d** directory, create a new file called exchange-http.conf for directives that pertain to Exchange HTTP and HTTPS traffic (or substitute the name you chose in Step 1). Copy in the directives from the `http` configuration block in the downloaded configuration file. Remember not to copy the first line (`http` `{`) or the closing curly brace (`}`) for the block, because the `http` block you created in Step 1 already has them. + +3. Also in the **/etc/nginx/conf.d** directory, create a new file called exchange-stream.conf for directives that pertain to Exchange TCP traffic (or substitute the name you chose in Step 1). Copy in the directives from the `stream` configuration block in the dowloaded configuration file. Again, do not copy the first line (`stream` `{`) or the closing curly brace (`}`). + +For reference purposes, the text of the full configuration files is included in this document: + +- [Full Configuration for Basic Load Balancing](#full-configuration-basic) +- [Full Configuration for Enhanced Load Balancing](#full-configuration-enhanced) + +We recommend, however, that you do not copy text directly from this document. It does not necessarily use the same mechanisms for positioning text (such as line breaks and white space) as text editors do. In text copied into an editor, lines might run together and indenting of child statements in configuration blocks might be missing or inconsistent. The absence of formatting does not present a problem for NGINX Plus, because (like many compilers) it ignores white space during parsing, relying solely on semicolons and curly braces as delimiters. The absence of white space does, however, make it more difficult for humans to interpret the configuration and modify it without making mistakes. + + +#### About Reloading Updated Configuration + +We recommend that each time you complete a set of updates to the configuration, you run the `nginx -t` command to test the configuration file for syntactic validity. + +```none +root# nginx -t +nginx: the configuration file /etc/nginx/nginx.conf syntax is ok +nginx: configuration file /etc/nginx/nginx.conf test is successful +``` + +To tell NGINX Plus to start using the new configuration, run one of the following commands: + +```none +root# nginx -s reload +``` + +or + +```none +root# service nginx reload +``` + + +### Configuring Global Settings + +Verify that the main **nginx.conf** file includes these global directives, adding them as necessary. + +```nginx +user nginx; +worker_processes auto; +error_log /var/log/nginx/error.log info; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +# If using the standard configuration scheme, the 'http' and 'stream' blocks are +# usually placed here and enclose 'include' directives that refer to files in +# the conf.d directory. +``` + +Directive documentation: [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log), [events](https://nginx.org/en/docs/ngx_core_module.html#events), [pid](https://nginx.org/en/docs/ngx_core_module.html#pid), [user](https://nginx.org/en/docs/ngx_core_module.html#user), [worker_connections](https://nginx.org/en/docs/ngx_core_module.html#worker_connections), [worker_processes](https://nginx.org/en/docs/ngx_core_module.html#worker_processes) + + +### Configuring TCP Load Balancing + +The directives in the top‑level `stream` configuration block configure TCP load balancing, and are the same for both basic and enhanced load balancing. Modify them as indicated: + +1. In the `upstream` block that defines the group of load‑balanced Internet Message Access Protocol (IMAP) servers, include a `server` directive for each of your CASs. For information about the `zone` directive, see . + + ```nginx + # In the 'stream' block + upstream exchange-imaps { + zone exchange-imaps 64k; + server 10.0.0.237:993; # Replace with IP address of a CAS + server 10.0.0.238:993; # Replace with IP address of a CAS + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server), [upstream](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#zone) + +2. In the `upstream` block that defines the group of load‑balanced Simple Mail Transfer Protocol (SMTP) servers, include a `server` directive for each of your CASs. + + ```nginx + # In the 'stream' block + upstream exchange-smtp { + zone exchange-smtp 64k; + server 10.0.0.237:25; # Replace with IP address of a CAS + server 10.0.0.238:25; # Replace with IP address of a CAS + } + ``` + +3. This `server` block defines the virtual server that proxies traffic on port 993 to the exchange-imaps upstream group configured in Step 1. + + ```nginx + # In the 'stream' block + server { + listen 993; + status_zone exchange-imaps; + proxy_pass exchange-imaps; + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen), [proxy_pass](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_pass), [server](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server), [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone) + +4. This `server` block defines the virtual server that proxies traffic on port 25 to the exchange-smtp upstream group configured in Step 2. If you wish to change the port number from 25 (for example, to 587), change the `listen` directive. + + ```nginx + # In the 'stream' block + server { + listen 25; # SMTP port can be changed here (to 587, for example) + status_zone exchange-smtp; + proxy_pass exchange-smtp; + } + ``` + +The `status_zone` directives create an entry for each virtual server on the **TCP Zones** tab of the NGINX Plus live activity monitoring dashboard, which you activate as part of [enhanced load balancing](#config-enhanced). We recommend including the directives even in a basic configuration, in case you decide to enable monitoring in the future. + +Similarly, the `zone` directives create an entry for each upstream group on the **TCP Upstreams** tab of the NGINX Plus live activity monitoring dashboard. They also create a shared memory zone for storing configuration and run‑time state information about the servers in the upstream group, which the `nginx` worker processes on the NGINX Plus host use to improve proxying and load‑balancing efficiency. + + +### Configuring Global HTTP and HTTPS Settings + +These directives in the top‑level `http` configuration block configure global HTTP and HTTPS settings. Modify them as indicated: + +1. These directives define the file in which access events are logged, and modify the default format of access log messages to include the `$upstream_addr` variable, which captures the address of the CAS. + + ```nginx + # In the 'http' block + log_format main '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_user_agent" "$upstream_addr"'; + access_log /var/log/nginx/access.log main; + ``` + + Directive documentation: [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log), [log_format](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) + +2. These directives set the duration of the indicated timeouts to 3 hours each, to support HTTP long polling by Exchange clients. + + ```nginx + # In the 'http' block + keepalive_timeout 3h; + proxy_read_timeout 3h; + ``` + + Directive documentation: [keepalive_timeout](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout), [proxy_read_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout) + +3. The `tcp_nodelay` directive enables use of the operating system's `TCP_NODELAY` option. (This option disables the aggregating of many small messages into a larger one, which is often done to reduce the number of packets being sent on the network.) + + ```nginx + # In the 'http' block + tcp_nodelay on; + ``` + + Directive documentation: [tcp_nodelay](https://nginx.org/en/docs/http/ngx_http_core_module.html#tcp_nodelay) + + +### Configuring Virtual Servers for HTTP and HTTPS Traffic + +These directives define virtual servers for HTTP and HTTPS traffic in the top‑level `http` configuration block. + +1. Configure a `server` block that defines the port for HTTPS traffic (443) and enables NGINX Plus to accept client traffic encrypted with SSL/TLS. + + ```nginx + # In the 'http' block + server { + listen 443 ssl; + status_zone exchange-combined; + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen), [server](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server), [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone) + + The `status_zone` directive creates an entry for the virtual server on the **Server zones** tab of the NGINX Plus live activity monitoring dashboard, which you activate as part of [enhanced load balancing](#config-enhanced). We recommend including the directive even in a basic configuration, in case you decide to enable monitoring in the future. + +2. This directive increases the default file upload size, which is required for Microsoft RPC Over HTTP. (Note that the directive does not set the maximum size of an email message.) + + ```nginx + # In the 'server' block for HTTPS traffic + client_max_body_size 2G; + ``` + + Directive documentation: [client_max_body_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) + +3. These directives name the SSL certificate and private key, and disable use of any protocol less secure than TLS version 1. + + ```nginx + # In the 'server' block for HTTPS traffic + ssl_certificate /etc/nginx/ssl/company.com.crt; + ssl_certificate_key /etc/nginx/ssl/company.com.key; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ``` + + Directive documentation: [ssl_certificate](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate), [ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate_key), [ssl_protocols](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) + +4. This location block redirects traffic from the main mail page (corresponding to **/**) to the Outlook Web App (OWA), which IIS does not do by default: + + ```nginx + # In the 'server' block for HTTPS traffic + location / { + return 301 /owa/"; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) + +5. (Optional) If a browser requests the **favicon.ico** file and it is not available, this `location` block disables logging of any resulting errors and supplies an empty image file. Many applications provide a **favicon.ico** file containing the icon that appears on a page's tab in the browser to indicate the application. IIS by default does not make the **favicon.ico** file available for the main mail page. + + ```nginx + # In the 'server' block for HTTPS traffic + location = /favicon.ico { + empty_gif; + access_log off; + } + ``` + + Directive documentation: [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log), [empty_gif](https://nginx.org/en/docs/http/ngx_http_empty_gif_module.html#empty_gif) + +6. Configure a `server` block that permanently redirects requests received on port 80 to the HTTPS server, which is defined in Step 1 above. + + ```nginx + # In the 'http' block + server { + listen 80; + location / { + return 301 https://$host$request_uri; + } + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return), [server](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server) + + +### Configuring HTTP/2 Support + +HTTP/2 is fully supported in NGINX Plus R7 and later (as well as NGINX 1.9.5 and later). As always, we recommend you run the latest version of software to take advantage of improvements and bug fixes. + +In NGINX Plus R8 and later, NGINX Plus supports HTTP/2 by default, and does not support SPDY: + +- In NGINX Plus R11 and later, the nginx-plus package continues to support HTTP/2 by default, but the nginx-plus-extras package available in previous releases is deprecated by [dynamic modules](https://www.nginx.com/products/nginx/dynamic-modules/). + +- For NGINX Plus R8 through R10, the nginx-plus and nginx-plus-extras packages support HTTP/2 by default. + +If using NGINX Plus R7, you must install the nginx-plus-http2 package instead of the nginx-plus or nginx-plus-extras package. + +To enable HTTP/2 support, add the `http2` directive in the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), so that it looks like this: + +```nginx +# In the 'server' block for HTTPS traffic +listen 443 ssl; +http2 on; +``` + +Directive documentation: [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) + +To verify that HTTP/2 translation is working, you can use the "HTTP/2 and SPDY indicator" plug‑in available for [Google Chrome](https://chrome.google.com/webstore/detail/http2-and-spdy-indicator/mpbpobfflnpcgagjijhmgnchggcjblin?hl=en) and [Firefox](https://addons.mozilla.org/en-US/firefox/addon/http2-indicator/). + + +## Completing the Configuration of Load Balancing + +The preceding instructions in [Configuring NGINX Plus](#config) are required for both basic and enhanced load balancing. At this point, you proceed to the section for the final desired configuration: + +- [Completing the Configuration of Basic Load Balancing](#config-basic) +- [Completing the Configuration of Enhanced Load Balancing](#config-enhanced) + + +### Completing the Configuration of Basic Load Balancing + +These directives complete the configuration for basic load balancing of Exchange traffic. (To finalize enhanced load balancing instead, proceed to [Completing the Configuration of Enhanced Load Balancing](#config-enhanced).) + +1. In the `upstream` block that defines the group of servers across which NGINX Plus load balances HTTPS traffic, include a `server` directive for each of your CASs. With NGINX Plus R7 and later, you can include the `ntlm` directive to use Microsoft NT LAN Manager for authentication. + + ```nginx + # In the 'http' block + upstream exchange { + zone exchange-general 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + sticky learn create=$remote_addr lookup=$remote_addr + zone=client_sessions:10m timeout=3h; + } + ``` + + Directive documentation: [ntlm](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm), [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky learn`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +2. In the `server` block for HTTPS traffic (created in [Configuring a Virtual Server for HTTPS Traffic](#https)), this `location` block disables buffering of both uploads and downloads, as required by Microsoft RPC Over HTTP. + + ```nginx + # In the 'server' block for HTTPS traffic + location / { + proxy_pass https://exchange; + proxy_buffering off; + proxy_http_version 1.1; + proxy_request_buffering off; + proxy_set_header Connection "Keep-Alive"; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering), [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass),[proxy_request_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + + +### Full Configuration for Basic Load Balancing + +```nginx +user nginx; +worker_processes auto; +error_log /var/log/nginx/error.log info; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +# If placing all directives in the main nginx.conf file, copy the following +# 'http' block into it, including the initial and final lines which open +# and close the 'http' context. + +# If creating a separate conf.d/exchange-http.conf file, either exclude the +# initial and final lines from the copied region, or copy them but comment +# them out in the exchange-http.conf file. + +http { + log_format main '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_user_agent" "$upstream_addr"'; + access_log /var/log/nginx/access.log main; + keepalive_timeout 3h; + proxy_read_timeout 3h; + tcp_nodelay on; + + upstream exchange { + zone exchange-general 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + sticky learn create=$remote_addr lookup=$remote_addr + zone=client_sessions:10m timeout=3h; + } + + server { + listen 80; + + location / { + return 301 https://$host$request_uri; + } + } + + server { + listen 443 ssl; + http2 on; + client_max_body_size 2G; + ssl_certificate /etc/nginx/ssl/company.com.crt; + ssl_certificate_key /etc/nginx/ssl/company.com.key; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + status_zone exchange-combined; + + location = / { + return 301 "/owa/"; + } + + location = /favicon.ico { + empty_gif; + access_log off; + } + + location / { + proxy_pass https://exchange; + proxy_buffering off; + proxy_http_version 1.1; + proxy_request_buffering off; + proxy_set_header Connection "Keep-Alive"; + } + } +} + +# If placing all directives in the main nginx.conf file, copy the following +# 'stream' block into it, including the initial and final lines which open +# and close the 'stream' context. + +# If using a separate conf.d/exchange-stream.conf file, either exclude +# the initial and final lines from the copied region, or copy them but +# comment them out in the exchange-stream.conf file. + +stream { + upstream exchange-imaps { + zone exchange-imaps 64k; + + server 10.0.0.237:993; # Replace with IP address of a CAS + server 10.0.0.238:993; # Replace with IP address of a CAS + } + + upstream exchange-smtp { + zone exchange-smtp 64k; + + server 10.0.0.237:25; # Replace with IP address of a CAS + server 10.0.0.238:25; # Replace with IP address of a CAS + } + + server { + listen 993; + status_zone exchange-imaps; + proxy_pass exchange-imaps; + } + + server { + listen 25; # SMTP port can be changed here (to 587, for example) + status_zone exchange-smtp; + proxy_pass exchange-smtp; + } +} +``` + + +### Completing the Configuration of Enhanced Load Balancing + +This section describes the configuration for enhanced load balancing of Exchange traffic. The enhancements improve the performance of your NGINX Plus deployment and make it easier to manage. + +(To finalize basic load balancing instead, return to [Completing the Configuration of Basic Load Balancing](#config-basic).) + +- [Configuring Application‑Specific Load Balancing](#app-specific-load-balancing) +- [Enabling the Live Activity Monitoring Dashboard](#monitoring) + + +#### Configuring Application-Specific Load Balancing + +Exchange CASs interact with various applications used by clients on different types of devices. The clients access virtual directories and URIs specific to their application. To improve the performance of applications and of NGINX Plus, configure a separate `location` block for each application to enable the features listed in the table. + + +{{}} + +|**Application** | **Virtual Directory or URI** | **Enhanced Features** | +| ---| ---| --- | +|ActiveSync | **/Microsoft-Server-ActiveSync** | Exchange Control Panel | +|Health checks | **/ecp** | Restricted access and health checks | +|MAPI over HTTP | **/mapi** | Health checks | +|OWA | **/owa** | Health checks | +|RPC Over HTTP | **/rpc/rpcproxy.dll** | Unbuffered upload and download; session persistence | + +{{}} + + + + + + +##### Configuring Granular URL Location Control + +1. In the top‑level `http` configuration block, there is a separate `upstream` block for each application, which improves Exchange's overall reliability by isolating the effect of outages to just the affected application. (In other words, creating separate upstream groups means that if an application or directory becomes unavailable, clients can still access the other applications and directories that are functioning and accessible.) + + In each `upstream` block, include a `server` directive for each of your CASs. With NGINX Plus R7 and later, you can include the `ntlm` directive to use Microsoft NT LAN Manager for authentication. + + ```nginx + # In the 'http' block + upstream exchange { + zone exchange-general 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-activesync { + zone exchange-activesync 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-ecp { + zone exchange-ecp 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-mapi { + zone exchange-mapi 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-owa { + zone exchange-owa 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-rpc { + zone exchange-rpc 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + sticky learn create=$remote_addr lookup=$remote_addr + zone=client_sessions:10m timeout=3h; + } + ``` + + Directive documentation: [ntlm](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm), [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +2. In the `server` block for HTTPS traffic (created in [Configuring a Virtual Server for HTTPS Traffic](#https)), a separate `location` block for each client application configures different handling of each type of traffic: + + - Clients that don't specify an application access the main page. + + ```nginx + # In the 'server' block for HTTPS traffic + location / { + proxy_pass https://exchange; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + + - Administrators using the Exchange Control Panel (ECP) access **/ecp**. Presumably you want to restrict access to this location, and one of the simplest ways is to uncomment the `allow` and `deny` directives, which allow access from your administrative network (substitute its IP address and prefix for 172.16.0.0/16) and deny access to everyone else. You could also use other security methods, like SSL certificates or an additional layer of HTTP Basic authentication. + + ```nginx + # In the 'server' block for HTTPS traffic + location /ecp { + #allow 172.16.0.0/16; # Replace with your admin network + #deny all; + proxy_pass https://exchange-ecp; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + ``` + + Directive documentation: [allow](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow), [deny](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny) + + - Outlook 2013 SP1 clients using MAPI Over HTTP access **/mapi**. + + ```nginx + # In the 'server' block for HTTPS traffic + location /mapi { + proxy_pass https://exchange-mapi; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + ``` + + - Mobile clients like iPhone and Android access the ActiveSync location (/Microsoft-Server-ActiveSync). + + ```nginx + # In the 'server' block for HTTPS traffic + location /Microsoft-Server-ActiveSync { + proxy_pass https://exchange-activesync; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + ``` + + - Clients using a browser for webmail access the OWA location (**/owa**). + + ```nginx + # In the 'server' block for HTTPS traffic + location /owa { + proxy_pass https://exchange-owa; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + ``` + + - Outlook Anywhere clients access the RPC Over HTTP location (**/rpc/rpcproxy.dll**). The two additional directives disable buffering for both upload and download of content, as required by RPC Over HTTP. + + ```nginx + # In the 'server' block for HTTPS traffic + location /rpc/rpcproxy.dll { + proxy_pass https://exchange-rpc; + proxy_buffering off; + proxy_http_version 1.1; + proxy_request_buffering off; + proxy_set_header Connection "Keep-Alive"; + } + ``` + + Directive documentation: [proxy_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering), [proxy_request_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering) + + +##### Configuring Application-Specific Health Checks + +For several applications, Exchange includes a health‑check mechanism that you can easily integrate with the health‑check feature in NGINX Plus. Specifically, you configure the NGINX Plus health check to succeed when the Exchange health check succeeds. + +When an Exchange health check succeeds for an application, the following lines are written at the end of the application's **healthcheck.htm** file. + +```none +200 OK +server-name.FQDN +``` + +Here's an example of the full contents of a **healthcheck.htm** file for the MAPI application: + +```none +root# curl -v https://mail.company.com/mapi/healthcheck.htm +> GET /mapi/healthcheck.htm HTTP/1.1 +> User-Agent: curl/7.37.1 +> Host: mail.company.com +> Accept: */* +> +< HTTP/1.1 200 OK +* Server nginx/1.7.11 is not blacklisted +< Server: nginx/1.7.11 +< Date: Thu, 02 Apr 2015 00:36:34 GMT +< Content-Length: 34 +< Connection: keep-alive +< X-Powered-By: ASP.NET +< X-FEServer: CAS02 +< +200 OK +CAS02.CORP.Company.com +``` + +These directives configure NGINX Plus health checks. + +1. In the `server` block for HTTPS traffic (described in [Configuring a Virtual Server for HTTPS Traffic](#https)), this `match` block checks for status code `200` and the string `200 OK` in the response body. + + ```nginx + # In the 'server' block for HTTPS traffic + match exchange-health { + status 200; + body ~ "200 OK"; + } + ``` + + Directive documentation: [match](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) + +2. In these `location` blocks (described in their basic form in [Configuring Granular URL Location Control](#url-control)), the addition of `health_check` directives configures NGINX Plus health checks: + + ```nginx + # In the 'server' block for HTTPS traffic location + location /ecp { + #allow 172.16.0.0/16; # Replace with your admin network + #deny all; + proxy_pass https://exchange-ecp; + proxy_http_version 1.1; + proxy_set_header Connection ""; + health_check uri=/ecp/healthcheck.htm interval=3s + match=exchange-health; + } + + location /mapi { + proxy_pass https://exchange-mapi; + proxy_http_version 1.1; + proxy_set_header Connection ""; + health_check uri=/mapi/healthcheck.htm interval=3s + match=exchange-health; + } + + location /owa { + proxy_pass https://exchange-owa; + proxy_http_version 1.1; + proxy_set_header Connection ""; + health_check uri=/owa/healthcheck.htm interval=3s + match=exchange-health; + } + ``` + + Directive documentation: [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + +3. Together, this `match` configuration block (in the top‑level `http` context) and this `health_check` directive (added to the existing `location` block for Outlook Anywhere clients) direct RPC traffic away from servers that don't have HTTP Basic authentication enabled. + + ```nginx + # In the 'http' block + match exchange-auth { + status 401; + header WWW-Authenticate ~ Basic; + } + + # In the 'server' block for HTTPS traffic + location /rpc/rpcproxy.dll { + proxy_pass https://exchange-rpc; + proxy_buffering off; + proxy_request_buffering off; + proxy_http_version 1.1; + proxy_set_header Connection "Keep-Alive"; + health_check uri=/rpc/rpcproxy.dll interval=3s + match=exchange-auth; + } + ``` + + Directive documentation: [proxy_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering), [proxy_request_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering) + + +#### Enabling the Live Activity Monitoring Dashboard + +NGINX Plus includes a live activity monitoring interface that provides key load and performance metrics in real time, including TCP metrics in NGINX Plus R6 and later. Statistics are reported through a RESTful JSON interface, making it very easy to feed the data to a custom or third‑party monitoring tool. There is also a built‑in dashboard in NGINX Plus. Follow these instructions to deploy it. + +Dashboard tab in NGINX Plus live activity monitoring dashboard + +For more information about live activity monitoring, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/monitoring/live-activity-monitoring.md" >}}). + +The quickest way to configure the module and the built‑in dashboard is to download the sample configuration file from the NGINX website, and modify it as necessary. For more complete instructions, see [Live Activity Monitoring of NGINX Plus in 3 Simple Steps](https://www.nginx.com/blog/live-activity-monitoring-nginx-plus-3-simple-steps/). + +1. Download the **status.conf** file to the NGINX Plus server: + + ```none + # cd /etc/nginx/conf.d + # curl https://www.nginx.com/resource/conf/status.conf > status.conf + ``` + +2. Read in **status.conf** at the top‑level `http` configuration block in the main **nginx.conf** file: + + ```nginx + # In the 'http' block + include conf.d/status.conf; + ``` + + If you are using the conventional configuration scheme and your existing `include` directives use the wildcard notation discussed in [Creating and Modifying Configuration Files](#config-files), you can either add a separate `include` directive for **status.conf** as shown above, or change the name of **status.conf** so it is captured by the wildcard in an existing `include` directive in the `http` block. For example, changing it to status-http.conf means it is captured by the `include` directive for `*-http.conf`. + + Directive documentation: [include](https://nginx.org/en/docs/ngx_core_module.html#include) + +3. Comments in **status.conf** explain which directives you must customize for your deployment. In particular, the default settings in the sample configuration file allow anyone on any network to access the dashboard. We strongly recommend that you restrict access to the dashboard with one or more of the following methods: + + - **IP address‑based access control lists (ACLs)**. In the sample configuration file, uncomment the `allow` and `deny` directives, and substitute the address of your administrative network for 10.0.0.0/8. Only users on the specified network can access the status page. + + ```nginx + allow 10.0.0.0/8; + deny all; + ``` + + Directive documentation: [allow](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow), [deny](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny) + + - **HTTP Basic authentication** as defined in [RFC 7617](https://tools.ietf.org/html/rfc7617). In the sample configuration file, uncomment the `auth_basic` and `auth_basic_user_file` directives and add user entries to the **/etc/nginx/users** file (for example, by using an **htpasswd** generator). If you have an Apache installation, another option is to reuse an existing **htpasswd** file. + + ```nginx + auth_basic on; + auth_basic_user_file /etc/nginx/users; + ``` + + Directive documentation: [auth_basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic), [auth_basic_user_file](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file) + + - **Client certificates**, which are part of a complete configuration of SSL/TLS. For more information, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the reference documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + - **Firewall**. Configure your firewall to disallow outside access to the port for the dashboard (8080 in the sample configuration file). + +When you reload the NGINX Plus configuration file, for example by running the `nginx -s reload` command, the NGINX Plus dashboard is available immediately at **http://_nginx-plus-server-address_:8080**. + +For more information about live activity monitoring, see the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/monitoring/live-activity-monitoring.md" >}}). + + +### Full Configuration for Enhanced Load Balancing + +```nginx +user nginx; +worker_processes auto; +error_log /var/log/nginx/error.log info; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +# If placing all directives in the main nginx.conf file, copy the following +# 'http' block into it, including the initial and final lines ('http { ... }') +# which open and close the 'http' context. + +# If creating a separate conf.d/exchange-http.conf file, either exclude the +# initial and final lines from the copied region, or copy them but comment +# them out in the exchange-http.conf file. + +http { + log_format main '$remote_addr - $remote_user [$time_local] + "$request" $status $body_bytes_sent "$http_referer" + "$http_user_agent" $upstream_addr'; + access_log /var/log/nginx/access.log main; + keepalive_timeout 3h; + proxy_read_timeout 3h; + tcp_nodelay on; + + # If this file serves as the main nginx.conf file (contains your entire + # site configuration), this 'include' directive reads in the + # configuration file for live activity monitoring. If creating a + # separate conf.d/exchange-http.conf file, put this directive in the main + # nginx.conf file instead. + include conf.d/status.conf; + + upstream exchange { + zone exchange-general 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-activesync { + zone exchange-activesync 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-ecp { + zone exchange-ecp 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-mapi { + zone exchange-mapi 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-owa { + zone exchange-owa 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + } + + upstream exchange-rpc { + zone exchange-rpc 64k; + ntlm; + + server 10.0.0.237:443; # Replace with IP address of a CAS + server 10.0.0.238:443; # Replace with IP address of a CAS + sticky learn create=$remote_addr lookup=$remote_addr + zone=client_sessions:10m timeout=3h; + } + + match exchange-auth { + status 401; + header WWW-Authenticate ~ Basic; + } + + match exchange-health { + status 200; + body ~ "200 OK"; + } + + server { + listen 80; + + location / { + return 301 https://$host$request_uri; + } + } + + server { + listen 443 ssl; + http2 on; + + client_max_body_size 2G; + ssl_certificate /etc/nginx/ssl/company.com.crt; + ssl_certificate_key /etc/nginx/ssl/company.com.key; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + status_zone exchange-combined; + + location = / { + return 301 "/owa/"; + } + + location = /favicon.ico { + empty_gif; + access_log off; + } + + location / { + proxy_pass https://exchange; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + + location /ecp { + # Grant access to admin users only, by uncommenting the 'allow' + # and 'deny' directives and substituting the IP address and + # prefix of your admin network. Or configure more sophisticated + # access control. + + #allow 172.16.0.0/16; # Replace with your admin network + #deny all; + + proxy_pass https://exchange-ecp; + proxy_http_version 1.1; + proxy_set_header Connection ""; + health_check uri=/ecp/healthcheck.htm interval=3s + match=exchange-health; + } + + location /mapi { + proxy_pass https://exchange-mapi; + proxy_http_version 1.1; + proxy_set_header Connection ""; + health_check uri=/mapi/healthcheck.htm interval=3s + match=exchange-health; + } + + location /Microsoft-Server-ActiveSync { + proxy_pass https://exchange-active-sync; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + + location /owa { + proxy_pass https://exchange-owa; + proxy_http_version 1.1; + proxy_set_header Connection ""; + health_check uri=/owa/healthcheck.htm interval=3s + match=exchange-health; + } + + location /rpc/rpcproxy.dll { + proxy_pass https://exchange-rpc; + proxy_buffering off; + proxy_http_version 1.1; + proxy_request_buffering off; + proxy_set_header Connection "Keep-Alive"; + health_check uri=/rpc/rpcproxy.dll interval=3s + match=exchange-auth; + } + } +} + +# If placing all directives in the main nginx.conf file, copy the following +# 'stream' block into it, including the initial and final lines +# ('stream { ... }") which open and close the 'stream' context. + +# If using a separate conf.d/exchange-stream.conf file, either exclude +# the initial and final lines from the copied region, or copy them but +# comment them out in the exchange-stream.conf file. + +stream { + upstream exchange-imaps { + zone exchange-imaps 64k; + + server 10.0.0.237:993; # Replace with IP address of a CAS + server 10.0.0.238:993; # Replace with IP address of a CAS + } + + upstream exchange-smtp { + zone exchange-smtp 64k; + + server 10.0.0.237:25; # Replace with IP address of a CAS + server 10.0.0.238:25; # Replace with IP address of a CAS + } + + server { + listen 993; + status_zone exchange-imaps; + proxy_pass exchange-imaps; + } + + server { + listen 25; # SMTP port can be changed here (to 587, for example) + status_zone exchange-smtp; + proxy_pass exchange-smtp; + } +} +``` + + +### Revision History + +- Version 6 (May 2024) – Update about HTTP/2 support (the [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) directive) +- Version 5 (April 2018) – Update for NGINX Plus API (NGINX Plus R14) +- Version 4 (May 2017) – Update about HTTP/2 support (NGINX Plus Release 11 and later) +- Version 3 (February 2016) – Conversion to HTML formatting (NGINX Plus Release 8) +- Version 2 (November 2015) – Updates for NGINX Plus Release 7 +- Version 1 (May 2015) – Initial version (NGINX Plus Release 6) + + diff --git a/content/nginx/deployment-guides/load-balance-third-party/node-js.md b/content/nginx/deployment-guides/load-balance-third-party/node-js.md new file mode 100644 index 000000000..b6bbcc625 --- /dev/null +++ b/content/nginx/deployment-guides/load-balance-third-party/node-js.md @@ -0,0 +1,886 @@ +--- +description: Load balance Node.js application servers with NGINX Open Source or the + advanced features in F5 NGINX Plus, following our step-by-step setup instructions. +docs: DOCS-453 +doctypes: +- task +title: Load Balancing Node.js Application Servers with NGINX Open Source and NGINX + Plus +toc: true +weight: 100 +--- + +This deployment guide explains how to use NGINX Open Source and F5 NGINX Plus to load balance HTTP and HTTPS traffic across a pool of Node.js application servers. The detailed instructions in this guide apply to both cloud‑based and on‑premises deployments of Node.js. + + + + +## About NGINX Open Source and NGINX Plus + +[NGINX Open Source](https://nginx.org/en) is an open source web server and reverse proxy that has grown in popularity in recent years because of its scalability, outstanding performance, and small footprint. NGINX Open Source was first created to solve the C10K problem (serving 10,000 simultaneous connections on a single web server). NGINX Open Source's features and performance have made it a staple of high‑performance sites – it's [the #1 web server at the 100,000 busiest websites in the world](https://w3techs.com/technologies/cross/web_server/ranking). + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of NGINX Open Source. NGINX Plus is a complete application delivery platform, extending the power of NGINX Open Source with a host of enterprise‑ready capabilities that enhance a Node.js deployment and are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + +## About Node.js + +[Node.js](https://nodejs.org/en/about/) is a JavaScript runtime built on the [V8 JavaScript engine](https://v8.dev/). Node.js uses an event‑driven, non‑blocking I/O model that makes it lightweight and efficient. The package ecosystem for Node.js, [npm](https://www.npmjs.com/), is the largest ecosystem of open source libraries in the world. + +To download the Node.js software and get installation instructions, visit the [Node.js](https://nodejs.org/en/download/) website. + +The information in this deployment guide applies equally to open source Node.js software and commercially supported Node.js frameworks. + + +## Prerequisites and System Requirements + +- A Node.js application server installed and configured on a physical or virtual system. +- A Linux system to host NGINX Open Source or NGINX Plus. To avoid potential conflicts with other applications, we recommend you install NGINX Plus on a fresh physical or virtual system. For the list of Linux distributions supported by NGINX Plus, see [NGINX Plus Technical Specifications]({{< relref "../../technical-specs.md" >}}). +- NGINX Open Source or NGINX Plus installed on the physical or virtual system. Some features are available only with [NGINX Plus](#enhanced), including sophisticated session persistence, application health checks, live activity monitoring, and dynamic reconfiguration of upstream groups. For installation instructions for both products, see the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/installing-nginx/_index.md" >}}). + +The instructions assume you have basic Linux system administration skills, including the following. Full instructions are not provided for these tasks. + +- Configuring and deploying a Node.js application +- Installing Linux software from vendor‑supplied packages +- Editing configuration files +- Copying files between a central administrative system and Linux servers +- Running basic commands to start and stop services +- Reading log files + +### About Sample Values and Copying of Text + +- `example.com` is used as a sample domain name (in key names and configuration blocks). Replace it with your organization's name. +- Many NGINX Open Source and NGINX Plus configuration blocks in this guide list two sample Node.js application servers with IP addresses 192.168.33.11 and 192.168.33.12. Replace these addresses with the IP addresses of your Node.js servers. Include a line in the configuration block for each server if you have more or fewer than two. +- For readability reasons, some commands appear on multiple lines. If you want to copy and paste them into a terminal window, we recommend that you first copy them into a text editor, where you can substitute the object names that are appropriate for your deployment and remove any extraneous formatting characters that your browser might insert. +- We recommend that you do not copy text from the configuration snippets in this guide into your configuration files. For the recommended way to create configuration files, see [Creating and Modifying Configuration Files](#config-files). + + +## Configuring an SSL/TLS Certificate for Client Traffic + +If you plan to enable SSL/TLS encryption of traffic between NGINX Open Source or NGINX Plus and clients of your Node.js application, you need to configure a server certificate for NGINX Open Source or NGINX Plus. + +- SSL/TLS support is enabled by default in all [NGINX Plus packages](https://cs.nginx.com/) and [NGINX Open Source binaries](https://nginx.org/en/linux_packages.html) provided by NGINX. +- If you are compiling NGINX Open Source from source, include the `--with-http_ssl_module` parameter to enable SSL/TLS support for HTTP traffic (the corresponding parameter for TCP/UDP is `--with-stream_ssl_module`, and for email is `--with-;mail_ssl_module`, but this guide does not cover those protocol types). +- If using binaries from other providers, consult the provider documentation to determine if they support SSL/TLS. + +There are several ways to obtain a server certificate, including the following. For your convenience, step-by-step instructions are provided for the second and third options. + +- If you already have an SSL certificate for NGINX Open Source or NGINX Plus installed on another UNIX or Linux system (including systems running Apache HTTP Server), copy it to the **/etc/nginx/ssl** directory on the NGINX Open Source or NGINX Plus server. +- Generate a self‑signed certificate as described in [Generating a Self‑Signed Certificate](#certificate-self-signed) below. This is sufficient for testing scenarios, but clients of production deployments generally require a certificate signed by a certificate authority (CA). +- Request a new certificate from a CA or your organization's security group, as described in [Generating a Certificate Request](#certificate-request) below. + +For more details on SSL/TLS termination, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}). + +### Generating a Self-Signed Certificate + +Generate a public‑private key pair and a self‑signed server certificate in PEM format that is based on them. + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Generate the key pair in PEM format (the default). To encrypt the private key, include the `-des3` parameter. (Other encryption algorithms are available, listed on the man page for the [genrsa](https://www.openssl.org/docs/manmaster/man1/openssl-genrsa.html) command.) You are prompted for the passphrase used as the basis for encryption. + + ```shell + root# openssl genrsa -des3 -out ~/private-key.pem 2048 + Generating RSA private key ... + Enter pass phrase for private-key.pem: + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/private-key.pem secure-dir/private-key.pem.backup + ``` + +4. Generate the certificate. Include the `-new` and `-x509` parameters to make a new self‑signed certificate. Optionally include the `-days` parameter to change the key's validity lifetime from the default of 30 days (10950 days is about 30 years). Respond to the prompts with values appropriate for your testing deployment. + + ```shell + root# openssl req -new -x509 -key ~/private-key.pem -out ~/self-cert.pem \ + -days 10950 + ``` + +5. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Open Source or NGINX Plus server. + +### Generating a Certificate Request + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Create a private key to be packaged in the certificate. + + ```shell + root# openssl genrsa -out ~/example.com.key 2048 + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/example.com.key /example.com.key.backup + ``` + +4. Create a Certificate Signing Request (CSR) file. + + ```shell + root# openssl req -new -sha256 -key ~/example.com.key -out ~/example.com.csr + ``` + +5. Request a certificate from a CA or your internal security group, providing the CSR file (**example.com.csr**). As a reminder, never share private keys (**.key** files) directly with third parties. + + The certificate needs to be PEM format rather than in the Windows‑compatible PFX format. If you request the certificate from a CA website yourself, choose NGINX or Apache (if available) when asked to select the server platform for which to generate the certificate. + +6. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + +## Creating and Modifying Configuration Files + +To reduce errors, this guide has you copy directives from files provided by NGINX into your configuration files, instead of using a text editor to type in the directives yourself. Then you go through the sections in this guide (starting with [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)) to learn how to modify the directives as required for your deployment. + +As provided, there is one file for basic load balancing (with NGINX Open Source or NGINX Plus) and one file for enhanced load balancing (with NGINX Plus). If you are installing and configuring NGINX Open Source or NGINX Plus on a fresh Linux system and using it only to load balance Node.js traffic, you can use the provided file as your main configuration file, which by convention is called **/etc/nginx/nginx.conf**. + +We recommend, however, that instead of a single configuration file you use the scheme that is set up automatically when you install an NGINX Plus package, especially if you already have an existing NGINX Open Source or NGINX Plus deployment or plan to expand your use of NGINX Open Source or NGINX Plus to other purposes in future. In the conventional scheme, the main configuration file is still called **/etc/nginx/nginx.conf**, but instead of including all directives in it, you create separate configuration files for different functions and store the files in the **/etc/nginx/conf.d** directory. You then use the `include` directive in the appropriate contexts of the main file to read in the contents of the function‑specific files. + +If you have just installed NGINX Open Source or NGINX Plus there is a default configuration file, **default.conf**, in the **/etc/nginx/conf.d** directory. This configuration defined there is not appropriate for the deployment described in this guide, but you want to leave a file with that name in the directory so it does not get replaced with a new version the next time you upgrade NGINX Open Source or NGINX Plus. To save a copy for future reference you can copy it to a new name without the **.conf** extension. + +To download the complete configuration file for basic load balancing: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/nodejs-basic.conf > nodejs-basic.conf +``` + +To download the complete configuration file for enhanced load balancing: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/nodejs-enhanced.conf > nodejs-enhanced.conf +``` + +(You can also access the URL in a browser and and copy the text into the indicated file.) + +**Note:** If you download both files, place only one of them in the **/etc/nginx/conf.d** directory. + +To set up the conventional configuration scheme, add an `http` configuration block in the main **nginx.conf** file, if it does not already exist. (The standard placement is below any global directives.) Add this [include](https://nginx.org/en/docs/ngx_core_module.html#include) directive with the appropriate filename: + +```nginx +http { + include conf.d/nodejs-(basic|enhanced).conf; +} +``` + +Directive documentation: [include](https://nginx.org/en/docs/ngx_core_module.html#include) + +You can also use wildcard notation to reference all files that pertain to a certain function or traffic type in the appropriate context block. For example, if you name all HTTP configuration files _function_-http.conf, this is an appropriate `include` directive: + +```nginx +http { + include conf.d/*-http.conf; +} +``` + +For reference purposes, the full configuration files are also provided in this document: + +- [Full Configuration for Basic Load Balancing](#full-configuration-basic) +- [Full Configuration for Enhanced Load Balancing](#full-configuration-enhanced) + +We recommend, however, that you do not copy text directly from this document. It does not necessarily use the same mechanisms for positioning text (such as line breaks and white space) as text editors do. In text copied into an editor, lines might run together and indenting of child statements in configuration blocks might be missing or inconsistent. The absence of formatting does not present a problem for NGINX Open Source or NGINX Plus, because (like many compilers) they ignore white space during parsing, relying solely on semicolons and curly braces as delimiters. The absence of white space does, however, make it more difficult for humans to interpret the configuration and modify it without making mistakes. + +### About Reloading Updated Configuration + +We recommend that each time you complete a set of updates to the configuration, you run the `nginx -t` command to test the configuration file for syntactic validity. + +```none +root# nginx -t +nginx: the configuration file /etc/nginx/nginx.conf syntax is ok +nginx: configuration file /etc/nginx/nginx.conf test is successful +``` + +To tell NGINX Open Source or NGINX Plus to start using the new configuration, run one of the following commands: + +```none +root# nginx -s reload +``` + +or + +```none +root# service nginx reload +``` + + +## Configuring Basic Load Balancing with NGINX Open Source or NGINX Plus + +This section explains how to set up NGINX Open Source or NGINX Plus as a load balancer in front of two Node.js servers. The instructions in the first two sections are mandatory: + +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) +- [Configuring Basic Load Balancing](#load-balancing-basic) + +The instructions in the remaining sections are optional, depending on the requirements of your application: + +- [Configuring Basic Session Persistence](#session-persistence-basic) +- [Configuring Proxy of WebSocket Traffic](#websocket) +- [Configuring Content Caching](#caching) +- [Configuring HTTP/2 Support](#http2) + +The complete configuration file appears in [Full Configuration for Basic Load Balancing](#full-configuration-basic). + +If you are using NGINX Plus, you can configure additional enhanced features after you complete the configuration of basic load balancing. See [Configuring Enhanced Load Balancing with NGINX Plus](#enhanced). + + +### Configuring Virtual Servers for HTTP and HTTPS Traffic + +These directives define virtual servers for HTTP and HTTPS traffic in separate `server` blocks in the top‑level `http` configuration block. All HTTP requests are redirected to the HTTPS server. + +1. Configure a `server` block that listens for requests for **"https://example.com"** received on port 443. + + The `ssl_certificate` and `ssl_certificate_key` directives are required; substitute the names of the certificate and private key you chose in [Configuring an SSL/TLS Certificate for Client Traffic](#tls-certificate). + + The other directives are optional but recommended. + + ```nginx + # In the 'http' block + server { + listen 443 ssl; + server_name example.com; + + ssl_certificate /etc/nginx/ssl/; + ssl_certificate_key /etc/nginx/ssl/; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name), [ssl_certificate](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate), [ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate_key), [ssl_prefer_server_ciphers](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_prefer_server_ciphers), [ssl_session_cache](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) + +2. Configure a `server` block that permanently redirects requests received on port 80 for **"http://example.com"** to the HTTPS server, which is defined in the next step. + + If you're not using SSL/TLS for client connections, omit the `location` block. When instructed in the remainder of this guide to add directives to the `server` block for HTTPS traffic, add them to this block instead. + + ```nginx + # In the 'http' block + server { + listen 80; + server_name example.com; + + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header Connection ""; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) + +For more information on configuring SSL/TLS, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the reference documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + +### Configuring Basic Load Balancing + +To configure load balancing, you first create a named "upstream group," which lists the backend servers. You then set up NGINX Open Source or NGINX Plus as a reverse proxy and load balancer by referring to the upstream group in one or more `proxy_pass` directives. + +1. Configure an upstream group called **nodejs** with two Node.js application servers listening on port 8080, one on IP address 192.168.33.11 and the other on 192.168.33.12. + + ```nginx + # In the 'http' block + upstream nodejs { + server 192.168.33.11:8080; + server 192.168.33.12:8080; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +2. In the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), include two `location` blocks: + + - The first one matches HTTPS requests in which the path starts with **/webapp/**, and proxies them to the **nodejs** upstream group we created in the previous step. + - The second one funnels all traffic to the first `location` block, by doing a temporary redirect of all requests for **"http://example.com/"**. + + ```nginx + # In the 'server' block for HTTPS traffic + location /webapp/ { + proxy_pass http://nodejs; + } + + location = / { + return 302 /webapp/; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) + + Note that these blocks handle only standard HTTPS traffic. If you want to load balance WebSocket traffic, you need to add another `location` block as described in [Configuring Proxy of WebSocket Traffic](#websocket). + +By default, NGINX Open Source and NGINX Plus use the Round Robin algorithm for load balancing among servers. The load balancer runs through the list of servers in the upstream group in order, forwarding each new request to the next server. In our example, the first request goes to 192.168.33.11, the second to 192.168.33.12, the third to 192.168.33.11, and so on. For information about the other available load‑balancing algorithms, see the NGINX Plus Admin Guide. + +In NGINX Plus, you can also set up dynamic reconfiguration of an upstream group when the set of backend servers changes, using the Domain Name System (DNS) or an API; see [Enabling Dynamic Reconfiguration of Upstream Groups](#reconfiguration). + +For more information on proxying and load balancing, see [NGINX Reverse Proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) and [HTTP Load Balancing]({{< relref "../../admin-guide/load-balancer/http-load-balancer.md" >}}) in the NGINX Plus Admin Guide, and the reference documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) and [Upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html) modules. + + +### Configuring Basic Session Persistence + +If your application requires basic session persistence (also known as _sticky sessions_), you can implement it in NGINX Open Source with the IP Hash load‑balancing algorithm. (NGINX Plus offers a more sophisticated form of session persistence, as described in [Configuring Advanced Session Persistence](#session-persistence-advanced).) + +With the IP Hash algorithm, for each request NGINX calculates a hash based on the client's IP address, and associates the hash with one of the upstream servers. It sends all requests with that hash to that server, thus establishing session persistence. + +If the client has an IPv6 address, the hash is based on the entire address. If it has an IPv4 address, the hash is based on just the first three octets of the address. This is designed to optimize for ISP clients that are assigned IP addresses dynamically from a subnetwork (/24) range. However, it is not effective in these cases: + +- The majority of the traffic to your site is coming from one forward proxy or from clients on the same /24 network, because in that case IP Hash maps all clients to the same server. +- A client's IP address can change during the session, for example when a mobile client switches from a WiFi network to a cellular one. + +To configure session persistence in NGINX, add the `ip_hash` directive to the `upstream` block created in [Configuring Basic Load Balancing](#load-balancing-basic): + +```nginx +# In the 'http' block +upstream nodejs { + ip_hash; + server 192.168.33.11:8080; + server 192.168.33.12:8080; +} +``` + +Directive documentation: [ip_hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash) + +You can also use the Hash load‑balancing method for session persistence, with the hash based on any combination of text and [NGINX variables](https://nginx.org/en/docs/varindex.html) you specify. For example, you can hash on full (four‑octet) client IP addresses with the following configuration. + +```nginx +# In the 'http' block +upstream nodejs { + hash $remote_addr; + server 192.168.33.11:8080; + server 192.168.33.12:8080; +} +``` + +Directive documentation: [hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash) + + +### Configuring Proxy of WebSocket Traffic + +The WebSocket protocol (defined in [RFC 6455](https://tools.ietf.org/html/rfc6455)) enables simultaneous two‑way communication over a single TCP connection between clients and servers, where each side can send data independently from the other. To initiate the WebSocket connection, the client sends a handshake request to the server, upgrading the request from standard HTTP to WebSocket. The connection is established if the handshake request passes validation, and the server accepts the request. When a WebSocket connection is created, a browser client can send data to a server while simultaneously receiving data from that server. + +The Node.js app server supports WebSocket out of the box, so no additional Node.js configuration is required. If you want to use NGINX Open Source or NGINX Plus to proxy WebSocket traffic to your Node.js application servers, add the directives discussed in this section. + +NGINX Open Source and NGINX Plus by default use HTTP/1.0 for upstream connections. To be proxied correctly, WebSocket connections require HTTP/1.1 along with some other configuration directives that set HTTP headers: + +```nginx +# In the 'http' block +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +# In the 'server' block for HTTPS traffic +location /wstunnel/ { + proxy_pass http://nodejs; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; +} +``` + +Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map), [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + +The first `proxy_set_header` directive is needed because the `Upgrade` request header is hop-by-hop; that is, the HTTP specification explicitly forbids proxies from forwarding it. This directive overrides the prohibition. + +The second `proxy_set_header` directive sets the `Connection` header to a value that depends on the test in the `map` block: if the request has an `Upgrade` header, the `Connection` header is set to `upgrade`; otherwise, it is set to `close`. + +For more information about proxying WebSocket traffic, see [WebSocket proxying](https://nginx.org/en/docs/http/websocket.html) and [NGINX as a WebSocket Proxy](https://www.nginx.com/blog/websocket-nginx/). + + +### Configuring Content Caching + +Caching responses from your Node.js app servers can both improve response time to clients and reduce load on the servers, because eligible responses are served immediately from the cache instead of being generated again on the server. There are a variety of useful directives that can be used to fine‑tune caching behavior; for a detailed discussion, see [A Guide to Caching with NGINX](https://www.nginx.com/blog/nginx-caching-guide/). + +To enable basic caching of responses from the Node.js app server, add the following configuration: + +1. Include the `proxy_cache_path` directive to create the local disk directory **/tmp/NGINX_cache/** for use as a cache. The `keys_zone` parameter allocates 10 megabytes (MB) of shared memory for a zone called **backcache**, which is used to store cache keys and metadata such as usage timers. A 1‑MB zone can store data for about 8,000 keys. + + ```nginx + # In the 'http' block + proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + ``` + + Directive documentation: [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + +2. In the `location` block that matches HTTPS requests in which the path starts with **/webapp/**, include the `proxy_cache` directive to reference the cache created in the previous step. + + ```nginx + # In the 'server' block for HTTPS traffic + location /webapp/ { + proxy_pass http://nodejs; + proxy_cache backcache; + } + ``` + + Directive documentation: [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) + +For more complete information on caching, refer to the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) and the reference documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) module. + + +### Configuring HTTP/2 Support + +HTTP/2 is fully supported in both NGINX 1.9.5 and later, and NGINX Plus R7 and later. As always, we recommend you run the latest version of software to take advantage of improvements and bug fixes. + +- If using NGINX Open Source, note that in version 1.9.5 and later the SPDY module is completely removed from the codebase and replaced with the [HTTP/2](https://nginx.org/en/docs/http/ngx_http_v2_module.html) module. After upgrading to version 1.9.5 or later, you can no longer configure NGINX Open Source to use SPDY. If you want to keep using SPDY, you need to compile NGINX Open Source from the sources in the [NGINX 1.8.x branch](https://nginx.org/en/download.html). + +- If using NGINX Plus, in R11 and later the nginx-plus package supports HTTP/2 by default, and the nginx-plus-extras package available in previous releases is deprecated by separate [dynamic modules](https://www.nginx.com/products/nginx/modules/) authored by NGINX. + + In NGINX Plus R8 through R10, the nginx-plus and nginx-plus-extras packages support HTTP/2 by default. + + In NGINX Plus R8 and later, NGINX Plus supports HTTP/2 by default, and does not support SPDY. + + If using NGINX Plus R7, you must install the nginx-plus-http2 package instead of the nginx-plus or nginx-plus-extras package. + +To enable HTTP/2 support, add the [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) directive in the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), so that it looks like this: + +```nginx +# In the 'server' block for HTTPS traffic +listen 443 ssl; +http2 on; +``` + +Directive documentation: [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) + +To verify that HTTP/2 translation is working, you can use the "HTTP/2 and SPDY indicator" plug‑in available for [Google Chrome](https://chrome.google.com/webstore/detail/http2-and-spdy-indicator/mpbpobfflnpcgagjijhmgnchggcjblin?hl=en) and [Firefox](https://addons.mozilla.org/en-US/firefox/addon/http2-indicator/). + + + +### Full Configuration for Basic Load Balancing + +The full configuration for basic load balancing appears here for your convenience. It goes in the `http` context. The complete file is available for [download](https://www.nginx.com/resource/conf/nodejs-basic.conf) from the NGINX website. + +We recommend that you do not copy text directly from this document, but instead use the method described in [Creating and Modifying Configuration Files](#config-files) to include these directives in your configuration – add an `include` directive to the `http` context of the main **nginx.conf** file to read in the contents of /etc/nginx/conf.d/nodejs-basic.conf. + +```nginx +proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + +map $http_upgrade $connection_upgrade { + default upgrade; + ' ' close; +} + +upstream nodejs { + # Use IP Hash for session persistence + ip_hash; + + # List of Node.js application servers + server 192.168.33.11:8080; + server 192.168.33.12:8080; +} + +server { + listen 80; + server_name example.com; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } +} + +server { + listen 443 ssl; + http2 on; + + server_name example.com; + + ssl_certificate /etc/nginx/ssl/certificate-name; + ssl_certificate_key /etc/nginx/ssl/private-key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + + # Return a temporary redirect to '/webapp/' when user requests '/' + location = / { + return 302 /webapp/; + } + + # Load balance requests for '/webapp/' across Node.js app servers + location /webapp/ { + proxy_pass http://nodejs; + proxy_cache backcache; + } + + # WebSocket configuration + location /wstunnel/ { + proxy_pass https://nodejs; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } +} + +``` + + +## Configuring Enhanced Load Balancing with NGINX Plus + +This section explains how to configure enhanced load balancing with some of the extended features in NGINX Plus. + +**Note:** Before setting up the enhanced features described in this section, you must complete the instructions for basic load balancing in these two sections: + +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) +- [Configuring Basic Load Balancing](#load-balancing-basic) + +Except as noted, all optional basic features (described in the other subsections of [Configuring Basic Load Balancing in NGINX Open Source and NGINX Plus](#basic)) can be combined with the enhanced features described here. + +- [Configuring Advanced Session Persistence](#session-persistence-advanced) +- [Configuring Application Health Checks](#health-checks) +- [Enabling Live Activity Monitoring](#live-activity-monitoring) +- [Enabling Dynamic Reconfiguration of Upstream Groups](#reconfiguration) + +The complete configuration file appears in [Full Configuration for Enhanced Load Balancing](#full-configuration-enhanced). + + +### Configuring Advanced Session Persistence + +NGINX Plus has more sophisticated session persistence methods than open source NGINX, implemented in three variants of the `sticky` directive. In the following example, we add the `sticky cookie` directive to the upstream group we created in [Configuring Basic Load Balancing](#load-balancing-basic). + +1. Remove or comment out the `ip_hash` directive, leaving only the `server` directives: + + ```nginx + # In the 'http' block + upstream nodejs { + #ip_hash; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +2. Configure session persistence that uses the `sticky cookie` directive. + + ```nginx + # In the 'http' block + upstream nodejs { + zone nodejs 64k; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + sticky cookie srv_id expires=1h domain=.example.com path=/; + } + ``` + + Directive documentation: [`sticky cookie`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +With this method, NGINX Plus adds an HTTP session cookie to the first response to a given client from the upstream group, identifying which server generated the response (in an encoded fashion). Subsequent requests from the client include the cookie value and NGINX Plus uses it to route the request to the same upstream server, thereby achieving session persistence. + +The `zone` directive creates a shared memory zone for storing information about sessions. The amount of memory allocated – here, 64 KB – determines how many sessions can be stored at a time (the number varies by platform). The name assigned to the zone – here, `nodejs` – must be unique for each `sticky` directive. + +The first parameter to `sticky cookie` (in the example, `srv_id`) sets the name of the cookie to be set or inspected. The `expires` parameter tells the browser how long the cookie is valid, here one hour. The `domain` parameter defines the domain and the `path` parameter defines the URL path for which the cookie is set. + +For more information about session persistence, see the NGINX Plus Admin Guide. + + +### Configuring Application Health Checks + +Health checks are out‑of‑band HTTP requests sent to a server at fixed intervals. They are used to determine whether a server is responsive and functioning correctly, without requiring an actual request from a client. + +Because the `health_check` directive is placed in the `location` block, we can enable different health checks for each application. + +1. In the `location` block that matches HTTPS requests in which the path starts with **/webapp/** (created in [Configuring Basic Load Balancing](#load-balancing-basic)), add the `health_check` directive. + + Here we configure NGINX Plus to send an out‑of‑band request for the top‑level URI **/** (slash) to each of the servers in the **nodejs** upstream group every 5 seconds (the default URI and frequency). If a server does not respond correctly, it is marked down and NGINX Plus stops sending requests to it until it passes a subsequent health check. We include the `match` parameter so we can define a nondefault set of health‑check tests (we define them in the next step). + + ```nginx + # In the 'server' block for HTTPS traffic + location /webapp/ { + proxy_pass http://nodejs; + proxy_cache backcache; + health_check match=health_check; + } + ``` + + Directive documentation: [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) + +2. In the `http` context, include a `match` directive to define the tests that a server must pass to be considered functional. In this example, it must return status code `200`, the `Content-Type` response header must contain `text/html`, and the response body must match the indicated character string. + + ```nginx + # In the 'http' block + match health_check { + status 200; + header Content-Type ~ text/html; + body ~ "Hello world"; + } + ``` + + Directive documentation: [match](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) + +3. In the **nodejs** upstream group, add the following `zone` directive as necessary (if you configured [advanced session persistence](#session-persistence-advanced) you already added it). It creates a shared memory zone that stores the group's configuration and run‑time state, which are accessible to all worker processes. + + ```nginx + # In the 'http' block + upstream nodejs { + zone nodejs 64k; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + # ... + } + ``` + + Directive documentation: [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +NGINX Plus also has a slow‑start feature that is a useful auxiliary to health checks. When a failed server recovers, or a new server is added to the upstream group, NGINX Plus slowly ramps up the traffic to it over a defined period of time. This gives the server time to "warm up" without being overwhelmed by more connections than it can handle as it starts up. For more information, see the NGINX Plus Admin Guide. + +For example, to set a slow‑start period of 30 seconds for your Node.js application servers, include the `slow_start` parameter to their `server` directives: + +```nginx +# In the 'upstream' block +server 192.168.33.11:8080 slow_start=30s; +server 192.168.33.12:8080 slow_start=30s; +``` + +Parameter documentation: [slow_start](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#slow_start) + +For information about customizing health checks, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/load-balancer/http-health-check.md" >}}). + + +### Enabling Live Activity Monitoring + +NGINX Plus includes a live activity monitoring interface that provides key load and performance metrics in real time, including TCP metrics in NGINX Plus R6 and later. Statistics are reported through a RESTful JSON interface, making it very easy to feed the data to a custom or third‑party monitoring tool. There is also a built‑in dashboard. Follow these instructions to deploy it. + +Dashboard tab in NGINX Plus live activity monitoring dashboard + +For more information about live activity monitoring, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/monitoring/live-activity-monitoring.md" >}}). + +The quickest way to configure the module and the built‑in NGINX Plus dashboard is to download the sample configuration file from the NGINX website and modify it as necessary. For more complete instructions, see [Live Activity Monitoring of NGINX Plus in 3 Simple Steps](https://www.nginx.com/blog/live-activity-monitoring-nginx-plus-3-simple-steps/). + +1. Download the **status.conf** file to the NGINX Plus server: + + ```nginx + # cd /etc/nginx/conf.d + # curl https://www.nginx.com/resource/conf/status.conf > status.conf + ``` + +2. Customize the file for your deployment as specified by comments in the file. In particular, the default settings in the file allow anyone on any network to access the dashboard. We strongly recommend that you restrict access to the dashboard with one or more of the following methods: + + - **IP address‑based access control lists (ACLs)**. In the sample configuration file, uncomment the `allow` and `deny` directives, and substitute the address of your administrative network for 10.0.0.0/8. Only users on the specified network can access the status page. + + ```nginx + allow 10.0.0.0/8; + deny all; + ``` + + Directive documentation: [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html) + + - **HTTP Basic authentication**. In the sample configuration file, uncomment the `auth_basic` and `auth_basic_user_file` directives and add user entries to the **/etc/nginx/users** file (for example, by using an [htpasswd generator](https://httpd.apache.org/docs/2.4/programs/htpasswd.html)). If you have an Apache installation, another option is to reuse an existing **htpasswd** file. + + ```nginx + auth_basic on; + auth_basic_user_file /etc/nginx/users; + ``` + + Directive documentation: [auth_basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic), [auth_basic_user_file](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file) + + - **Client certificates**, which are part of a complete configuration of SSL/TLS. For more information, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + - **Firewall**. Configure your firewall to disallow outside access to the port for the dashboard (8080 in the sample configuration file). + +3. In the **nodejs** upstream group, include the `zone` directive as necessary (if you configured [advanced session persistence](#session-persistence-advanced) or [application health checks](#health-checks), you already added it). It creates a shared memory zone that stores the group's configuration and run‑time state, which are accessible to all worker processes. + + ```nginx + # In the 'http' block + upstream nodejs { + zone nodejs 64k; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + # ... + } + ``` + + Directive documentation: [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +4. In the `server` block for HTTPS traffic (created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)), add the `status_zone` directive: + + ```nginx + # In the 'server' block for HTTPS traffic + status_zone nodejs_server; + ``` + + Directive documentation: [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone) + +When you reload the NGINX Plus configuration file, for example by running the `nginx -s reload` command, the NGINX Plus dashboard is available immediately at http://_nginx-plus-server-address_:8080. + + +### Enabling Dynamic Reconfiguration of Upstream Groups + +With NGINX Plus, you can reconfigure load‑balanced server groups (both HTTP and TCP/UDP) dynamically using either DNS or the NGINX Plus API introduced in NGINX Plus R13. See the NGINX Plus Admin Guide for a more detailed discussion of the DNS and [API]({{< relref "../../admin-guide/load-balancer/dynamic-configuration-api.md" >}}) methods. + +#### Configuring the API Method + +To enable dynamic reconfiguration of your upstream group of Node.js app servers using the NGINX Plus API, you need to grant secured access to it. You can use the API to add or remove servers, dynamically alter their weights, and set their status as `primary`, `backup`, or `down`. + +1. Include the `zone` directive in the **nodejs** upstream group to create a shared memory zone for storing the group's configuration and run‑time state, which makes the information available to all worker processes. (If you configured [advanced session persistence](#session-persistence-advanced), [application health checks](#health-checks), or [live activity monitoring](#live-activity-monitoring), you already made this change.) + + ```nginx + # In the 'http' block + upstream nodejs { + zone nodejs 64k; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + # ... + } + ``` + + Directive documentation: [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +2. In the `server` block for HTTPS traffic (created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)), add a new `location` block for the NGINX Plus API, which enables dynamic reconfiguration among other features. It contains the `api` directive (**api** is also the conventional name for the location, as used here). + + (If you configured [live activity monitoring](#live-activity-monitoring) by downloading the **status.conf** file, it already includes this block.) + + We strongly recommend that you restrict access to the location so that only authorized administrators can access the NGINX Plus API. The [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html) directives in the following example permit access only from the localhost address (127.0.0.1). + + ```nginx + # In the 'server' block for HTTPS traffic + location /api { + api write=on; + allow 127.0.0.1; + deny all; + } + ``` + + Directive documentation: [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html), [api](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) + +#### Configuring the DNS Method + +In the `http` block, add the `resolver` directive pointing to your DNS server and then add the `resolve` parameter to the `server` directive in the **nodejs** `upstream` block, which instructs NGINX Plus to periodically re‑resolve the domain name (**example.com** here) with DNS: + +```nginx +# In the 'http' block +resolver ; + +upstream nodejs { + zone nodejs 64k; + server example.com resolve; +} +``` + +Directive and parameter documentation: [resolve](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#resolve), [resolver](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) + +[NGINX Plus Release 9](https://www.nginx.com/blog/nginx-plus-r9-released/#dns-srv) and later can also use the additional information in DNS `SRV` records, such as the port number. Include the `service` parameter to the `server` directive, along with the `resolve` parameter: + +```nginx +# In the 'http' block +resolver ; + +upstream nodejs { + zone nodejs 64k; + server example.com service=http resolve; +} +``` + +Parameter documentation: [service](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#service) + + +### Full Configuration for Enhanced Load Balancing + +The full configuration for enhanced load balancing appears here for your convenience. It goes in the `http` context. The complete file is available for [download](https://www.nginx.com/resource/conf/nodejs-enhanced.conf) from the NGINX website. + +We recommend that you do not copy text directly from this document, but instead use the method described in [Creating and Modifying Configuration Files](#config-files) to include these directives in your configuration – namely, add an `include` directive to the `http` context of the main **nginx.conf** file to read in the contents of /etc/nginx/conf.d/nodejs-enhanced.conf. + +**Note:** The `api` block in this configuration summary and the [downloadable](https://www.nginx.com/resource/conf/nodejs-enhanced.conf) nodejs-enhanced.conf file is for the [API method](#reconfiguration-api) of dynamic reconfiguration. If you want to use the [DNS method](#reconfiguration-dns) instead, make the appropriate changes to the block. (You can also remove or comment out the directives for the NGINX Plus API in that case, but they do not conflict with using the DNS method and enable features other than dynamic reconfiguration.) + +```nginx +proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +match nodejs_check { + status 200; + header Content-Type ~ "text/html"; + body ~ "Hello world"; +} + +upstream nodejs { + # Health-monitored upstream groups must have a zone defined + zone nodejs 64k; + + # List of Node.js application servers + server 192.168.33.11:8080 slow_start=30s; + server 192.168.33.12:8080 slow_start=30s; + + # Session persistence using sticky cookie + sticky cookie srv_id expires=1h domain=.example.com path=/; +} + +server { + listen 80; + server_name example.com; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } +} + +server { + listen 443 ssl; + http2 on; + + server_name example.com; + + # Required for NGINX Plus to provide extended status information + status_zone nodejs; + + ssl_certificate /etc/nginx/ssl/certificate-name; + ssl_certificate_key /etc/nginx/ssl/private-key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + + # Return a 302 redirect to '/webapp/' when user requests '/' + location = / { + return 302 /webapp/; + } + + # Load balance requests for '/webapp/' across Node.js app servers + location /webapp/ { + proxy_pass http://nodejs; + proxy_cache backcache; + # Set up active health checks + health_check match=nodejs_check; + } + + # WebSocket configuration + location /wstunnel/ { + proxy_pass https://nodejs; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } + + # Secured access to the NGINX Plus API + location /api { + api write=on; + allow 127.0.0.1; # Permit access from localhost + deny all; # Deny access from everywhere else + } +} +``` + + +## Resources + +- [NGINX Plus Overview](https://www.nginx.com/products/nginx) +- [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/_index.md" >}}) +- [NGINX Wiki](https://www.nginx.com/resources/wiki/) + +_[NodeSource](https://nodesource.com/), developers of N|Solid, contributed to this deployment guide._ + +### Revision History + +- Version 4 (May 2024) – Update about HTTP/2 support (the [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) directive) +- Version 3 (April 2018) – Updated information about the NGINX Plus API (NGINX Plus R13, NGINX Open Source 1.13.4) +- Version 2 (May 2017) – Update about HTTP/2 support (NGINX Plus R11 and later) +- Version 1 (December 2016) – Initial version (NGINX Plus R11, NGINX 1.11.5) diff --git a/content/nginx/deployment-guides/load-balance-third-party/oracle-e-business-suite.md b/content/nginx/deployment-guides/load-balance-third-party/oracle-e-business-suite.md new file mode 100644 index 000000000..c5f2d42f4 --- /dev/null +++ b/content/nginx/deployment-guides/load-balance-third-party/oracle-e-business-suite.md @@ -0,0 +1,1183 @@ +--- +description: Load balance Oracle E-Business Suite applications with the advanced features + in F5 NGINX Plus, following our step-by-step setup instructions. +docs: DOCS-454 +doctypes: +- task +title: Load Balancing Oracle E-Business Suite with NGINX Plus +toc: true +weight: 100 +--- + +This deployment guide explains how to use F5 NGINX Plus to load balance traffic across a pool of Oracle E-Business Suite (EBS) 12 servers. It provides complete instructions for configuring NGINX Plus as required. + + +## About NGINX Plus and Oracle EBS + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of [NGINX Open Source](https://nginx.org/en/). NGINX Plus is a complete application delivery platform, extending the power of NGINX with a host of enterprise‑ready capabilities that enhance an Oracle EBS deployment and are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + +[Oracle E‑Business Suite](https://www.oracle.com/applications/ebusiness/) (EBS) is a comprehensive suite of integrated, global business applications that enable organizations to make better decisions, reduce costs, and increase performance. Its cross‑industry capabilities include enterprise resource planning, customer relationship management, and supply chain planning. + + +## Prerequisites and System Requirements + +The following systems and software are required: + +- Oracle EBS 12.2, installed and configured according to Oracle best practices. +- Linux system to host NGINX Plus. To avoid potential conflicts with other applications, we recommend you install NGINX Plus on a fresh physical or virtual system. For the list of operating sytsems supported by NGINX Plus, see [NGINX Plus Technical Specifications]({{< relref "../../technical-specs.md" >}}). +- NGINX Plus R6 or later. + +You can install NGINX Plus on premises, in a private cloud, or in a public cloud such as the Amazon Elastic Compute Cloud (EC2), the Google Cloud Platform, or Microsoft Azure. See the instructions for your installation type: + +- On premises or private cloud – [Installing NGINX Plus]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus.md" >}}) +- Amazon Elastic Compute Cloud (EC2) – [Installing NGINX Plus AMIs on Amazon EC2]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-amazon-web-services.md" >}}) +- Google Compute Cloud – [Installing NGINX Plus on the Google Cloud Platform]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-google-cloud-platform.md" >}}) +- Microsoft Azure – [Installing NGINX Plus on Microsoft Azure]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-microsoft-azure.md" >}}) + +The instructions assume you have basic Linux system administration skills, including the following. Full instructions are not provided for these tasks. + +- Configuring and deploying EBS +- Installing Linux software from vendor‑supplied packages +- Editing configuration files +- Copying files between a central administrative system and Linux servers +- Running basic commands to start and stop services +- Reading log files + +Similarly, the instructions assume you have the support of the team that manages your Oracle deployment. Their tasks include the following: + +- Modifying Oracle configurations to configure a Web Entry Point +- Verifying the configuration + + +### About Sample Values and Copying of Text + +- `company.com` is used as a sample organization name (in key names and configuration blocks). Replace it with your organization's name. +- Many NGINX Plus configuration blocks in this guide list two sample EBS application servers with IP addresses 172.31.0.146 and 172.31.11.210. Replace these addresses with the IP addresses of your EBS servers. Include a line in the configuration block for each server if you have more or fewer than two. +- For readability reasons, some commands appear on multiple lines. If you want to copy and paste them into a terminal window, we recommend that you first copy them into a text editor, where you can substitute the object names that are appropriate for your deployment and remove any extraneous formatting characters that your browser might insert. +- We recommend that you do not copy text from the configuration snippets in this guide into your configuration files. For the recommended way to create configuration files, see [Creating and Modifying Configuration Files](#config-files). + + +## Architectural Design + +This figure represents a typical load‑balancing architecture: + +![Typical architecture for load balancing three application servers + +A load balancer performs the following tasks: + +- Terminates SSL/TLS connections (encrypts and decrypts SSL/TLS traffic) +- Selects backend servers based on a load‑balancing method and health checks +- Forwards HTTP requests to selected backend servers +- Provides session persistence +- Provides logging and monitoring capabilities + +Oracle EBS has application tiers and a database tier. A load balancer is used in front of application tiers in order to provide higher performance, availability, security, and traffic management for the application servers. + +NGINX Plus as a load balancer between clients and the application tier in an Oracle E-Business Suite deployment + + +## Configuring Firewalls + +For improved security, the NGINX Plus load balancer might be located in a DMZ. This might complicate and delay the installation process because of a required firewall configuration. + +Review the network configuration requirements in the table and make appropriate changes to your firewalls before proceeding with the configuration. + + +{{}} + +|**Purpose** | **Port** | **Source** | **Destination** | +| ---| ---| ---| --- | +|Admin access, file transfer | 22 | Administrative network | NGINX Plus load balancer | +|Installation and update of NGINX Plus software | 443 | NGINX Plus load balancer | Repository: pkgs.nginx.com | +|HTTP to HTTPS redirects | 80 | Any | NGINX Plus | +|Production HTTPS traffic | 443 | Any | NGINX Plus | +|Access to backend application | 8000* | NGINX Plus | Backend application servers | +|Access to load‑balanced application from application servers | 443 | Backend application servers | NGINX Plus load balancer | + +{{}} + + +**\*** Replace port 8000 with the actual application port as appropriate. + + +## Configuring an SSL/TLS Certificate for Client Traffic + +If you plan to enable SSL/TLS encryption of traffic between NGINX Plus and clients of your EBS application, you need to configure a server certificate for NGINX Plus. + +- TLS/SSL support is enabled by default in all [NGINX Plus packages](https://account.f5.com/myf5) and [NGINX binaries](https://nginx.org/en/linux_packages.html) provided by NGINX, Inc. +- If using binaries from other providers, consult the provider documentation to determine if they support TLS/SSL. + +There are several ways to obtain a server certificate, including the following. For your convenience, step-by-step instructions are provided for the second, third, and fourth options. + +- If you already have an SSL/TLS certificate for NGINX or NGINX Plus installed on another UNIX or Linux system (including systems running Apache HTTP Server), copy it to the **/etc/nginx/ssl** directory on the NGINX Plus server. +- Generate a self‑signed certificate as described in [Generating a Self‑Signed Certificate with the openssl Command](#certificate-self-signed). This is sufficient for testing scenarios, but clients of production deployments generally require a certificate signed by a certificate authority (CA). +- Request a new certificate from a CA or your organization's security group, as described in [Generating a Certificate Request with the openssl Command](#certificate-request). +- If you already have an SSL/TLS certificate on a Windows system, see [Exporting and Converting an SSL/TLS Certificate from an IIS Server](#certificate-iis). + +For more details on SSL/TLS termination, see the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/security-controls/terminating-ssl-http" >}}). + + +### Generating a Self-Signed Certificate with the openssl Command + +Generate a public‑private key pair and a self‑signed server certificate in PEM format that is based on them. + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Generate the key pair in PEM format (the default). To encrypt the private key, include the `-des3` parameter. (Other encryption algorithms are available, listed on the man page for the [genrsa](https://www.openssl.org/docs/manmaster/man1/openssl-genrsa.html) command.) You are prompted for the passphrase used as the basis for encryption. + + ```shell + root# openssl genrsa -des3 -out ~/private-key.pem 2048 + Generating RSA private key ... + Enter pass phrase for private-key.pem: + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/private-key.pem secure-dir/private-key.pem.backup + ``` + +4. Generate the certificate. Include the `-new` and `-x509` parameters to make a new self-signed certificate. Optionally include the `-days` parameter to change the key's validity lifetime from the default of 30 days (10950 days is about 30 years). Respond to the prompts with values appropriate for your testing deployment. + + ```shell + root# openssl req -new -x509 -key ~/private-key.pem -out ~/self-cert.pem -days 10950 + ``` + +5. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + (In the configuration file for a single Web Entry Point that you can download from the NGINX, Inc. website, the filenames for the certificate and private key are **server.crt** and **server.key**. For a discussion of the file and download instructions, see [Creating and Modifying Configuration Files](#config-files).) + + +### Generating a Certificate Request with the openssl Command + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Create a private key to be packaged in the certificate. + + ```shell + root# openssl genrsa -out ~/company.com.key 2048 + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/company.com.key secure-dir/company.com.key.backup + ``` + +4. Create a Certificate Signing Request (CSR) file. + + ```shell + root# openssl req -new -sha256 -key ~/company.com.key -out ~/company.com.csr + ``` + +5. Request a certificate from a CA or your internal security group, providing the CSR file (**company.com.csr**). As a reminder, never share private keys (**.key** files) directly with third parties. + + The certificate needs to be PEM format rather than in the Windows‑compatible PFX format. If you request the certificate from a CA website yourself, choose NGINX or Apache (if available) when asked to select the server platform for which to generate the certificate. + +6. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + (In the configuration file for a single Web Entry Point that you can download from the NGINX, Inc. website, the filenames for the certificate and private key are **server.crt** and **server.key**. For a discussion of the file and download instructions, see [Creating and Modifying Configuration Files](#config-files).) + + +### Exporting and Converting an SSL/TLS Certificate from an IIS Server + +On Windows systems, SSL/TLS certificates are packaged in a Public‑Key Cryptography Standards (PKCS) archive file with extension **.pfx**. You need to export the **.pfx** file and convert the contents to the Linux‑compatible PEM format. + +Working in the Microsoft Management Console, perform the following steps: + +1. Open the **Certificates** snap‑in. + +2. In the left‑hand navigation pane, click the **Certificates** folder in the logical store for the certificate you want to export (in the following figure, it is Personal > Certificates). + +3. In the main pane, right‑click the certificate to be exported (in the following figure, it is **cas01.company.com**). + +4. On the menu that pops up, select **All Tasks**, then click **Export**. + + ![Certificates snap-in to Microsoft Management Console, used to export SSL/TLS certificate](/nginx/images/oracle-ebs-iis-certlm.png) + +5. In the Certificate Export Wizard window that pops up, click **Yes, export the private key**. (This option appears only if the private key is marked as exportable and you have access to it.) + +6. If prompted for a password (used to encrypt the **.pfx** file before export), type it in the **Password** and **Confirm** fields. (Remember the password, as you need to provide it when importing the bundle to NGINX Plus.) + +7. Click **Next**. + +8. In the **File name** field, type the filename and path to the location for storing the exported file (certificate and private key). Click **Next**, then **Finish**. + +9. Copy the **.pfx** file to the NGINX Plus server. + +Working on the NGINX Plus server (which must have the `openssl` software installed), perform the following steps: + +1. Log in as the root user. + +2. Extract the private key file from the **.pfx** file. You are prompted first for the password protecting the **.pfx** file (see Step 6 above), then for a new password used to encrypt the private key file being created (**company.com.key.encrypted** in the following sample command). + + ```shell + root# openssl pkcs12 -in exported-certs.pfx -nocerts -out company.com.key.encrypted + ``` + +3. Decrypt the key file. At the prompt, type the password you created in the previous step for the private key file. + + ```shell + root# openssl rsa -in company.com.key.encrypted -out company.com.key + ``` + +4. Extract the certificate file. + + ```shell + root# openssl pkcs12 -in exported-cert.pfx -clcerts -nokeys -out company.com.crt + ``` + +5. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + (In the configuration file for a single Web Entry Point that you can download from the NGINX, Inc. website, the filenames for the certificate and private key are **server.crt** and **server.key**. For a discussion of the file and download instructions, see [Creating and Modifying Configuration Files](#config-files).) + + +## Configuring Oracle EBS + +For Oracle applications to work with a load balancer, you need to configure a Web Entry Point. For full instructions, refer to the Oracle documentation on configuring Web Entry points, +_Using Load‑Balancers with Oracle E‑Business Suite Release 12.2_ (MOS Doc ID 1375686.1). + +Use the AutoConfig Context Editor to set the configuration values in the applications context file on application servers. + +Here are examples of appropriate values: + + +{{}} + +|Load Balancer Entry Point | store.company.com | +|Application Server 1 | apps-tier1.company.com | +|Application Server 2 | apps-tier2.company.com | +|Web Entry Protocol | https | +|Application Tier Web Protocol | http | +|Application Tier Web Port | 8000 | +|Active Web Port | 443 | + +{{}} + + + +## Configuring NGINX Plus for Oracle EBS + +The instructions in the following sections are required for NGINX Plus to load balance EBS servers properly. + +- [Creating and Modifying Configuration Files](#config-files) +- [Configuring Global Settings](#global-settings) +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) +- [Setting the Default MIME Type](#mime-type) +- [Configuring Load Balancing](#load-balancing) +- [Configuring Session Persistence](#session-persistence) + +The instructions in these sections are optional, but improve the performance and manageability of your NGINX Plus deployment: + +- [Configuring HTTP/2 Support](#http2) +- [Configuring Application Health Checks](#health-checks) +- [Configuring Caching for Application Acceleration](#caching) +- [Configuring Advanced Logging and Monitoring](#logging-monitoring) +- [Configuring Backup Servers for Disaster Recovery](#backup-servers) +- [Configuring NGINX Plus for High Availability](#high-availability) + +Finally, if you need multiple Web Entry Points, see [Configuring Multiple Web Entry Points](#multiple-points). + + +### Creating and Modifying Configuration Files + +To reduce errors, this guide has you copy directives from files provided by NGINX into your configuration files, instead of using a text editor to type in the directives yourself. Then you go through the sections in this guide (starting with [Configuring Global Settings](#global-settings)) to learn how to modify the directives as required for your deployment. + +As provided, there is one file for a single Web Entry Point and one file for multiple Web Entry Points. If you are installing and configuring NGINX Plus on a fresh Linux system and using it only to load balance EBS traffic, you can use the provided file as your main configuration file, which by convention is called **/etc/nginx/nginx.conf**. + +We recommend, however, that instead of a single configuration file you use the scheme that is set up automatically when you install an NGINX Plus package, especially if you already have an existing NGINX or NGINX Plus deployment or plan to expand your use of NGINX Plus to other purposes in future. In the conventional scheme, the main configuration file is still called **/etc/nginx/nginx.conf**, but instead of including all directives in it, you create separate configuration files for different functions and store the files in the **/etc/nginx/conf.d** directory. You then use the `include` directive in the appropriate contexts of the main file to read in the contents of the function‑specific files. + +To download the complete configuration file for a single Web Entry Point: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/oracle-single-entry-point.conf > oracle-single-entry-point.conf +``` + +To download the complete configuration file for multiple Web Entry Points: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/oracle-multiple-entry-point.conf > oracle-multiple-entry-point.conf +``` + +(You can also access the URL in a browser and download the file that way.) + +To set up the conventional configuration scheme, add an `http` configuration block in the main **nginx.conf** file, if it does not already exist. (The standard placement is below any global directives; see [Configuring Global Settings](#global-settings).) Add this `include` directive with the appropriate filename: + +```nginx +http { + include conf.d/oracle-(single|multiple)-entry-point.conf; +} +``` + +Directive documentation: [include](https://nginx.org/en/docs/ngx_core_module.html#include) + +You can also use wildcard notation to reference all files that pertain to a certain function or traffic type in the appropriate context block. For example, if you name all HTTP configuration files _function_-http.conf, this is an appropriate include directive: + +```nginx +http { + include conf.d/*-http.conf; +} +``` + +For reference purposes, the full configuration files are also provided in this document: + +- [Full Configuration for a Single Web Entry Point](#full-configuration-single) +- [Full Configuration for Multiple Entry Points](#full-configuration-multiple) + +We recommend, however, that you do not copy text directly from this document. It does not necessarily use the same mechanisms for positioning text (such as line breaks and white space) as text editors do. In text copied into an editor, lines might run together and indenting of child statements in configuration blocks might be missing or inconsistent. The absence of formatting does not present a problem for NGINX Plus, because (like many compilers) it ignores white space during parsing, relying solely on semicolons and curly braces as delimiters. The absence of white space does, however, make it more difficult for humans to interpret the configuration and modify it without making mistakes. + +#### About Reloading Updated Configuration + +We recommend that each time you complete a set of updates to the configuration, you run the `nginx` `-t` command to test the configuration file for syntactic validity. + +```none +root# nginx -t +nginx: the configuration file /etc/nginx/nginx.conf syntax is ok +nginx: configuration file /etc/nginx/nginx.conf test is successful +``` + +To tell NGINX Plus to start using the new configuration, run one of the following commands: + +```none +root# nginx -s reload +``` + +or + +```none +root# service nginx reload +``` + + +### Configuring Global Settings + +Verify that the main **nginx.conf** file includes the following global directives, adding them as necessary. + +```nginx +user nginx; +worker_processes auto; +error_log /var/log/nginx/error.log info; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +# If using the standard configuration scheme, the 'http' block is usually placed here +# and encloses 'include' directives that refer to files in the conf.d directory. +``` + +Directive documentation: [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log), [pid](https://nginx.org/en/docs/ngx_core_module.html#pid), [user](https://nginx.org/en/docs/ngx_core_module.html#user), [worker_connections](https://nginx.org/en/docs/ngx_core_module.html#worker_connections), [worker_processes](https://nginx.org/en/docs/ngx_core_module.html#worker_processes) + + +### Configuring Virtual Servers for HTTP and HTTPS Traffic + +These directives define virtual servers for HTTP and HTTPS traffic in separate `server` blocks in the top‑level `http` configuration block. All HTTP requests are redirected to the HTTPS server. + +1. Configure a `server` block that listens for requests for **"https://company.com"** received on port 443. + + The required `ssl_certificate` and `ssl_certificate_key` directives name the certificate and private key files you created in [Configuring an SSL/TLS Certificate for Client Traffic](#tls-certificate). Here we use the filenames – **server.crt** and **server.key** – specified in the configuration file for a single Web Entry Point that we downloaded from the NGINX website in [Creating and Modifying Configuration Files](#config-files). + + ```nginx + # In the 'http' block + server { + listen 443 ssl; + server_name company.com; + + ssl_certificate /etc/nginx/ssl/server.crt; + ssl_certificate_key /etc/nginx/ssl/server.key; + ssl_protocols TLSv1.2; + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name), [ssl_certificate and ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate), [ssl_protocols](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) + + This server listens on every IP address. If needed, you can restrict listening to one or more IP addresses (IPv4 or IPv6). For example, with this `listen` directive the server listens on address 10.210.15.20 and port 443: + + ```nginx + listen 10.210.15.20:443 ssl; + ``` + +2. Configure a `server` block that permanently redirects requests received on port 80 for **"http://company.com"** to the HTTPS server defined in the previous step. Opening port 80 does not decrease security, because the requests to this port don't result in connections to your backend servers. + + ```nginx + # In the 'http' block + server { + listen 80; + status_zone oracle-http-redirect; + return 301 https://$http_host$request_uri; + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone) + +For more information on configuring SSL/TLS, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the reference documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + +### Setting the Default MIME Type + +In case the EBS server does not specify the MIME type of the data it is sending to the client (in the `Content-Type` response header), define the default MIME type as `text/html`. Include these directives in the `http` context: + +```nginx +# In the 'http' block +include /etc/nginx/mime.types; +default_type text/html; +``` + +Directive documentation: [default_type](https://nginx.org/en/docs/http/ngx_http_core_module.html#default_type), [include](https://nginx.org/en/docs/ngx_core_module.html#include) + + +### Configuring Load Balancing + +To configure load balancing, you first create a named _upstream group_, which lists your EBS app servers. You then set up NGINX Plus as a reverse proxy and load balancer by referring to the upstream group in one or more `proxy_pass` directives. + +1. Configure an upstream group called **oracle** with two EBS application servers listening on port 8000, one on IP address 172.31.11.210 and the other on 172.33.0.146. Each upstream group name in the configuration must be unique. + + ```nginx + # In the 'http' block + upstream oracle { + zone oracle 64k; + server 172.31.11.210:8000 max_fails=0; + server 172.31.0.146:8000 max_fails=0; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + + The `zone` directive creates a 64‑KB shared memory zone, also called **oracle**, for storing configuration and runtime state information about the group that is shared among worker processes. + + Add a `server` directive for each of your EBS app servers. You can identify servers by IP address or hostnames. If using hostnames, make sure that the operating system on the NGINX Plus server can resolve them. + + NGINX Plus supports two different kinds of application health checks, active and passive. We recommend configuring [application health checks](#health-checks) and disabling passive health checks by including the `max_fails=0` parameter on each `server` directive. + +2. In the `server` block for HTTPS traffic created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), add a `location` block that proxies all traffic to the upstream group. + + ```nginx + # In the 'server' block for HTTPS traffic + location / { + proxy_pass http://oracle; + proxy_set_header Host $host; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + +By default, NGINX and NGINX Plus use the Round Robin algorithm for load balancing among servers. The load balancer runs through the list of servers in the upstream group in order, forwarding each new request to the next server. In our example, the first request goes to 172.31.11.210, the second to 192.168.0.146, the third to 172.31.11.210, and so on. For information about the other available load‑balancing algorithms, see the NGINX Plus Admin Guide. + +For more information on proxying and load balancing, see [NGINX Reverse Proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) and [HTTP Load Balancing]({{< relref "../../admin-guide/load-balancer/http-load-balancer.md" >}}) in the NGINX Plus Admin Guide, and the documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) and [Upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html) modules. + + +### Configuring Session Persistence + +EBS applications require session persistence. Without it, you will experience unexpected session logouts almost immediately after logging in. Oracle supports three methods for session persistence: active cookie, passive cookie, and IP address‑based. + +For simplicity, configure active‑cookie session persistence with the NGINX Plus "sticky cookie" method. NGINX Plus adds a cookie called **ngxcookie** to every new user session, recording a hash of the backend server that was selected for the first request from the user. The cookie expires when the browser restarts. + +Add the `sticky cookie` directive to the `upstream` block created in [Configuring Load Balancing](#load-balancing), so the complete block looks like this: + + ```nginx + # In the 'http' block + upstream oracle { + zone oracle 64k; + server 172.31.11.210:8000 max_fails=0; + server 172.31.0.146:8000 max_fails=0; + sticky cookie ngxcookie; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky cookie`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + + +### Configuring HTTP/2 Support + +HTTP/2 is fully supported in both NGINX 1.9.5 and later, and NGINX Plus R7 and later. As always, we recommend you run the latest version of software to take advantage of improvements and bug fixes. + +- If using open source NGINX, note that in version 1.9.5 and later the SPDY module is completely removed from the NGINX codebase and replaced with the [HTTP/2](https://nginx.org/en/docs/http/ngx_http_v2_module.html) module. After upgrading to version 1.9.5 or later, you can no longer configure NGINX to use SPDY. If you want to keep using SPDY, you need to compile NGINX from the sources in the [NGINX 1.8 branch](https://nginx.org/en/download.html). + +- If using NGINX Plus, in R11 and later the nginx-plus package supports HTTP/2 by default, and the nginx-plus-extras package available in previous releases is deprecated by separate [dynamic modules](https://www.nginx.com/products/nginx/modules/) authored by NGINX. + + In NGINX Plus R8 through R10, the nginx-plus and nginx-plus-extras packages support HTTP/2 by default. + + In NGINX Plus R8 and later, NGINX Plus supports HTTP/2 by default, and does not support SPDY. + + If using NGINX Plus R7, you must install the nginx-plus-http2 package instead of the nginx-plus or nginx-plus-extras package. + +To enable HTTP/2 support, add the `http2` directive in the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), so that it looks like this: + +```nginx +# In the 'server' block for HTTPS traffic +listen 443 ssl; +http2 on; +``` + +Directive documentation: [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) + +To verify that HTTP/2 translation is working, you can use the "HTTP/2 and SPDY indicator" plug‑in available for [Google Chrome](https://chrome.google.com/webstore/detail/http2-and-spdy-indicator/mpbpobfflnpcgagjijhmgnchggcjblin?hl=en) and [Firefox](https://addons.mozilla.org/en-US/firefox/addon/http2-indicator/). + + +### Configuring Application Health Checks + +By default, NGINX Open Source and NGINX Plus perform basic (_passive_) checks on responses from upstream servers, retrying failed requests where possible. NGINX Plus adds out-of-band application health checks (also known as _active health checks_ or _synthetic transactions_). The related slow‑start feature gradually ramps up traffic to servers in the load‑balanced group as they recover from a failure, allowing them to "warm up" without being overwhelmed. + +These features enable NGINX Plus to detect and work around a much wider variety of problems and have the potential to significantly improve the availability of your Oracle applications. + +We are configuring an active application health check to verify that the Oracle application returns the `X-ORACLE-DMS-ECID` header. If not, the health check fails and NGINX Plus doesn't send requests to the failed server. + +1. In the `http` context, include a `match` directive to define the tests that a server must pass to be considered functional. In this example, it must return a status code between `200` and `399` and the `X-ORACLE-DMS-ECID` header must be set. + + ```nginx + # In the 'http' block + match oracleok { + status 200-399; + header X-ORACLE-DMS-ECID; + } + ``` + + Directive documentation: [match](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) + +2. In the `server` block for HTTPS traffic created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), add a new `location` block for the health check. + + ```nginx + # In the 'server' block for HTTPS traffic + location @health_check { + internal; + proxy_connect_timeout 3s; + proxy_read_timeout 3s; + proxy_pass http://oracle; + proxy_set_header Host "oracle.company.com"; + health_check match=oracleok interval=4s + uri=/OA_HTML/AppsLocalLogin.jsp; + } + ``` + + Directive documentation: [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check), [internal](https://nginx.org/en/docs/http/ngx_http_core_module.html#internal), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_connect_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_read_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + +Note that the `location` block is in the `server` block for HTTPS traffic, but the `match` block is in the `http` block. + +NGINX Plus also has a slow‑start feature that is a useful auxiliary to health checks. When a failed server recovers, or a new server is added to the upstream group, NGINX Plus slowly ramps up the traffic to it over a defined period of time. This gives the server time to "warm up" without being overwhelmed by more connections than it can handle as it starts up. For more information, see the NGINX Plus Admin Guide. + +For example, to set a slow‑start period of 30 seconds for your EBS application servers, include the `slow_start` parameter to their `server` directives: + +```nginx +# In the 'upstream' block +server 172.31.11.210:8000 slow_start=30s; +server 172.31.0.146:8000 slow_start=30s; +``` + +Parameter documentation: [slow_start](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#slow_start) + +For information about customizing health checks, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/load-balancer/http-health-check.md" >}}). + + +### Configuring Caching for Application Acceleration + +Caching of static objects like the following significantly improves the performance of Oracle EBS: + +- Images +- CSS files +- JavaScript files +- Java applets + +Before configuring caching, make sure that the NGINX Plus host has adequate free disk space and disk performance. SSDs are preferred for their superior performance, but standard spinning media can be used. + +1. Create a directory for cached files: + + ```shell + root@nginx# mkdir /var/oracle-cache + root@nginx# chown nginx /var/oracle-cache + ``` + +2. In the `http` context, define the path to the cache, the name (**cache_oracle**) and maximum size (50 MB) of the shared memory zone used for storing cache keys, and the maximum size of the cache itself (here, 500 MB). Adjust the size values as appropriate for the amount of free disk space on the NGINX Plus host. + + ```nginx + # In the 'http' block + proxy_cache_path /var/oracle-cache/ keys_zone=cache_oracle:50m max_size=500m; + ``` + + Directive documentation: [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + +3. In the `server` block for HTTPS traffic created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), enable caching by defining the name of the shared memory zone for the cache (**cache_oracle**). + + Also add the `proxy_cache_valid` directive to the existing `location` block for `/` (slash). The `any` parameter specifies that all responses are cached, and the `1h` parameter that cached items expire after one hour. + + ```nginx + # In the 'server' block for HTTPS traffic + proxy_cache cache_oracle; + location / { + proxy_pass http://oracle_one; + proxy_set_header Host $host; + proxy_cache_valid any 1h; + } + ``` + + Directive documentation: [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), [proxy_cache_valid](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_valid), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + +For more complete information on caching, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) and the reference documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) module. + +You can track cache usage using the following methods: + +- Statistics from the [NGINX Plus API](https://nginx.org/en/docs/http/ngx_http_api_module.html) module, displayed on the built‑in live activity monitoring dashboard, or fed to a custom or third‑party reporting tool +- The NGINX Plus access log, when the log format includes the `$upstream_cache_status` variable + +For detailed configuration instructions, see the next section. + + +### Configuring Advanced Logging and Monitoring + +NGINX Plus provides multiple ways to monitor your Oracle EBS installation, providing data about unavailable servers, failed health checks, response code statistics, and performance. In addition to its built‑in tools, NGINX Plus easily integrates into enterprise monitoring systems through industry‑standard protocols. + +- [Configuring Logging with a Custom Message Format](#logging-custom-format) +- [Configuring Logging with syslog](#logging-syslog) +- [Configuring Live Activity Monitoring](#live-activity-monitoring) +- [Monitoring with Third-Party Tools](#monitoring-third-party) + + +#### Configuring Logging with a Custom Message Format + +You can customize the format of messages written to the NGINX Plus access log to include more application‑specific information. Most system variables can be included in log messages. The predefined NGINX Plus **combined** format includes the following variables: + +- `$body_bytes_sent` – Number of bytes in the body of the response sent to the client +- `$http_user_agent` – `User-Agent` header in the client request +- `$http_referer` – `Referer` header in the client request +- `$remote_addr` – Client IP address +- `$remote_user` – Username provided for HTTP Basic authentication +- `$request` – Full original request line +- `$status` – Response status code +- `$time_local` – Local time in the Common Log Format + +You can access the complete list of NGINX Plus variables [here](https://nginx.org/en/docs/varindex.html). + +To make troubleshooting of our load‑balancing deployment easier, let's add the `$upstream_addr` variable (the address of the actual server generating the response) to the variables in the **combined** format. + +Add the following `log_format` and `access_log` directives in the `http` context to enable access logging to **/var/log/nginx/access.log** and to define the message format: + +```nginx +# In the 'http' block +log_format main '$remote_addr - $remote_user [$time_local] + "$request" $status $body_bytes_sent "$http_referer" + "$http_user_agent" $upstream_addr'; +access_log /var/log/nginx/access.log main; +``` + +Directive documentation: [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log), [log_format](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) + +To disable logging for all HTTP traffic, for a virtual server, or for a location, include the following directive in the `http`, `server`, or `location` block respectively: + +```nginx +access_log off; +``` + +Note that the message format for error logs is predefined and cannot be changed. + + +#### Configuring Logging with syslog + +The `syslog` utility is a widely used standard for message logging. It is used in the backbone of many monitoring and log‑aggregation solutions. + +You can configure NGINX Plus to direct both error logs and access logs to `syslog` servers. These examples configure logging to a `syslog` server listening on IP address 192.168.1.1 and the default UDP port, 514. + +To configure the error log, add the following `error_log` directive in the main context, the `http` context, or a `server` or `location` block: + +```nginx +# In the main, 'http', 'server', or 'location' block +error_log syslog:server=192.168.1.1 info; +``` + +Directive documentation: [error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log) + +To configure the access log using the predefined **combined** format, add the following `access_log` directive in the `http` context (it appears on multiple lines here solely for formatting reasons: + +```nginx +access_log syslog:server=192.168.1.1,facility=local7,tag=oracle,severity=info + combined; +``` + +Directive documentation: [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) + +You can include multiple `error_log` and `access_log` directives in the same context. Messages are sent to every `syslog` server and file. + + +#### Configuring Live Activity Monitoring + +NGINX Plus includes a live activity monitoring interface that provides key load and performance metrics in real time, including TCP metrics in NGINX Plus R6 and later. Statistics are reported through a RESTful JSON interface, making it very easy to feed the data to a custom or third‑party monitoring tool. There is also a built‑in dashboard. Follow these instructions to deploy it. + +Dashboard tab in NGINX Plus live activity monitoring dashboard + +The quickest way to configure live activity monitoring and the built‑in dashboard is to download the sample configuration file from the NGINX website and modify it as necessary. For more complete instructions, see [Live Activity Monitoring of NGINX Plus in 3 Simple Steps](https://www.nginx.com/blog/live-activity-monitoring-nginx-plus-3-simple-steps/) on our blog. + +1. Download the **status.conf** file to the NGINX Plus server: + + ```none + # cd /etc/nginx/conf.d + # curl https://www.nginx.com/resource/conf/status.conf > status.conf + ``` + +2. Verify that the main configuration file (**/etc/nginx/nginx.conf**) has an `include` directive that reads the file into the `http` context. The following is specific to **status.conf**, but a wildcard version also works: + + ```nginx + # In the 'http' block in nginx.conf + include conf.d/status.conf; + ``` + + Directive documentation: [include](https://nginx.org/en/docs/ngx_core_module.html#include) + +3. Customize the file for your deployment as specified by comments in the file. In particular, the default settings in the file allow anyone on any network to access the dashboard. We strongly recommend that you restrict access to the dashboard with one or more of the following methods: + + - **IP address‑based access control lists (ACLs)**. In the sample configuration file, uncomment the `allow` and `deny` directives, and substitute the address of your administrative network for 10.0.0.0/8. Only users on the specified network can access the status page. + + ```nginx + allow 10.0.0.0/8; + deny all; + ``` + + Directive documentation: [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html) + + - **HTTP Basic authentication**. In the sample configuration file, uncomment the `auth_basic` and `auth_basic_user_file` directives and add user entries to the **/etc/nginx/users** file (for example, by using an [htpasswd generator](https://httpd.apache.org/docs/2.4/programs/htpasswd.html)). If you have an Apache installation, another option is to reuse an existing **htpasswd** file. + + ```nginx + auth_basic on; + auth_basic_user_file /etc/nginx/users; + ``` + + Directive documentation: [auth_basic and auth_basic_user_file](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) + + - **Client certificates**, which are part of a complete configuration of SSL/TLS. For more information, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + - **Firewall**. Configure your firewall to disallow outside access to the port for the dashboard (8080 in the sample configuration file). + +4. In the `server` block for HTTPS traffic (created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)), add this `status_zone` directive: + + ```nginx + # In the 'server' block for HTTPS traffic + status_zone oracle-ssl; + ``` + + Directive documentation: [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone) + +When you reload the NGINX Plus configuration file, for example by running the `nginx -s reload` command, the NGINX Plus dashboard is available immediately at http://_nginx-server-address_:8080. + +For more information about live activity monitoring, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/monitoring/live-activity-monitoring.md" >}}). + + +#### Monitoring with Third-Party Tools + +The NGINX Plus API exports metrics in JSON format, so you can feed them to many monitoring systems. In addition, the [Prometheus-njs]({{< relref "../../admin-guide/dynamic-modules/prometheus-njs.md" >}}) module converts the JSON output to a Prometheus‑compliant format. + +Several NGINX partners offer integrations for displaying and analyzing NGINX Open Source and NGINX Plus metrics. See our [Technology Partners](https://www.nginx.com/partners/technology-partners) page. + + +### Configuring Backup Servers for Disaster Recovery + +If you have backup EBS servers, either at the same physical location as your regular servers or at a disaster recovery site, you can include them in the configuration so that EBS continues to work even if all the primary EBS servers go down. + +To configure backup servers, add `server` directives to the `upstream` block created in [Configuring Load Balancing](#load-balancing) and include the `backup` parameter. NGINX Plus does not forward traffic to them unless the primary servers all go down. + +```nginx +# In the 'upstream' block +server 172.33.111.210:8000 max_fails=0 backup; +server 172.33.100.146:8000 max_fails=0 backup; +``` + +Parameter documentation: [backup](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#backup) + +You can then use a DNS‑based global load‑balancing solution to secure against site‑level failures. + + +### Configuring NGINX Plus for High Availability + +To increase the reliability of your EBS deployment even more, configure a higly available (HA) deployment of NGINX Plus. + +For configuration instructions for on‑premises deployments of NGINX Plus, see the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/high-availability/" >}}). + +For configuration instructions for cloud‑based deployments of NGINX Plus, see our deployment guides: + +- [Active-Active HA for NGINX Plus on AWS Using AWS Network Load Balancer]({{< relref "../amazon-web-services/high-availability-network-load-balancer.md" >}}) +- [Active-Passive HA for NGINX Plus on AWS Using Elastic IP Addresses]({{< relref "../amazon-web-services/high-availability-keepalived.md" >}}) +- [All-Active HA for NGINX Plus on the Google Cloud Platform]({{< relref "../google-cloud-platform/high-availability-all-active.md" >}}) + +For other cloud environments, refer to the documentation provided by your cloud vendor. + +We recommend that you use the integrated cloud tools as simple high‑availability solutions and let NGINX Plus perform more sophisticated operations: + +- Security +- SSL/TLS termination +- Advanced request routing +- Health checks +- Session persistence +- Monitoring +- Caching + + +### Configuring Multiple Web Entry Points + +The preceding sections of this document, starting with [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), describe how to configure NGINX Plus load balancing for a single Web Entry Point. + +You might need to configure multiple Web Entry Points through the same load balancer, for reasons like the following: + +- Access from your internal network vs. externally available servers +- Access by different groups of users (employees, partners, customers) +- Access with different networking requirements (for example, a multihop DMZ configuration) + +If you need multiple Web Entry Points, then for each one you must: + +- Add a separate [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) block for each set of app servers +- Add a separate [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) block for each load balancer entry point +- Ensure that each [shared memory zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) has a unique name +- Include the [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) directive in every `server` block +- Change the IP addresses in [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directives from wildcard values to specific addresses, if needed +- Provide additional SSL/TLS certificate files if not using UCC or wildcard certificates + +For a sample configuration, see [Full Configuration for Multiple Web Entry Points](#full-configuration-multiple). + + +## Full Configuration Files + +For your convenience, the configuration files in this section include all directives discussed in this guide. It is intended for reference. As explained in [About Sample Values and Copying of Text](#sample-values), we recommend that you do not copy text from this document into configuration files, because it might include unwanted link text and not include whitespace and other formatting that makes the configuration easy to read. Instead, download the appropriate file from the NGINX, Inc. website as described in [Creating and Modifying Configuration Files](#config-files). + +Note that these configuration files contain sample values that you need to change for your deployment. + + +### Full Configuration for a Single Web Entry Point + +```nginx +user nginx; +worker_processes auto; +error_log /var/log/nginx/error.log info; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type text/html; + proxy_cache_path /var/oracle-cache keys_zone=cache_oracle:50m max_size=500m; + + # Custom logging configuration + log_format main '$remote_addr - $remote_user [$time_local] + "$request" $status $body_bytes_sent "$http_referer" + "$http_user_agent" $upstream_addr'; + access_log /var/log/nginx/access.log main; + + upstream oracle { + zone oracle 64k; + + # Production servers + server 172.31.11.210:8000 max_fails=0; + server 172.31.0.146:8000 max_fails=0; + + # Disaster recovery servers + server 172.33.111.210:8000 max_fails=0 backup; + server 172.33.100.146:8000 max_fails=0 backup; + + # Session persistence + sticky cookie ngxcookie; + } + + server { + listen 80; + status_zone oracle-http-redirect; + return 302 https://$http_host$request_uri; + } + + server { + listen 443 ssl; + http2 on; + server_name company.com; + ssl_certificate /etc/nginx/ssl/certificate-name.crt; + ssl_certificate_key /etc/nginx/ssl/private-key.key; + ssl_protocols TLSv1.2; + status_zone oracle-ssl; + proxy_cache cache_oracle; + + location / { + proxy_pass http://oracle; + proxy_set_header Host $host; + proxy_cache_valid any 1h; + } + + location @health_check { + internal; + proxy_connect_timeout 3s; + proxy_read_timeout 3s; + proxy_pass http://oracle; + proxy_set_header Host "oracle.company.com"; + health_check match=oracleok interval=4s + uri=/OA_HTML/AppsLocalLogin.jsp; + } + } + + match oracleok { + status 200-399; + header X-ORACLE-DMS-ECID; + } + + # Live activity monitoring configuration + server { + # Status zone required for live activity monitoring. Enable it for + # every 'server' block in other configuration files. + status_zone status-page; + + # If NGINX Plus is listening on multiple IP addresses, uncomment this + # directive to restrict access to the live activity monitoring + # dashboard to a single IP address (substitute the appropriate + # address). + # listen 10.2.3.4:8080; + # Live activity monitoring is enabled on port 8080 by default. + listen 8080; + + # HTTP Basic authentication is enabled by default. Use an htpasswd + # generator to add users, or command-line and other management tools are + # readily available online. If you have Apache HTTP Server installed, you + # can reuse its htpasswd file. + #auth_basic on; + #auth_basic_user_file /etc/nginx/users; + + # Limit access to the dashboard to users on admin networks only. + # Uncomment the "allow" directive and change the IP address. + #allow 10.0.0.0/8; + deny all; + + # Enable the NGINX Plus API for metrics collection. + location /api { + api write=on; + access_log off; + } + + # NGINX Plus includes a built-in dashboard. + location = /dashboard.html { + root /usr/share/nginx/html; + } + + # Redirect requests made to the pre-R14 dashboard. + location = /status.html { + return 301 /dashboard.html; + } + + # Standard HTTP features are fully supported with the dashboard. + # Redirect request for '/' to '/dashboard.html'. + location = / { + return 301 /dashboard.html; + } + } +} +``` + + +### Full Configuration for Multiple Web Entry Points + +This configuration is for two Web Entry Points with the following settings: + + +{{}} + +| | **Web Entry Point 1** | **Web Entry Point 2** | +| ---| ---| --- | +|Domain name | **oracle-one.company.com** | **oracle-two.company.com** | +|SSL/TLS certificate and key | **server_one.crt** & **server_one.key** | **server_two.crt** & **server_two.key** | +|Status zone | **oracle-ssl-one** | **oracle-ssl-two** | +|Cache zone | **cache_oracle_one** | **cache_oracle_two** | +|Upstream name | **oracle_one** | **oracle_two** | +|EBS servers | 172.31.11.210 & 172.31.0.146 | 172.31.11.211 & 172.31.0.147 | +|Backup (DR) EBS servers | 172.33.111.210 & 172.33.100.146 | 172.33.111.211 & 172.33.100.147 | + +{{}} + + +```nginx +user nginx; +worker_processes auto; +error_log /var/log/nginx/error.log info; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type text/html; + proxy_cache_path /var/oracle-cache-one + keys_zone=cache_oracle_one:50m max_size=500m; + proxy_cache_path /var/oracle-cache-two + keys_zone=cache_oracle_two:50m max_size=500m; + + # Custom logging configuration + log_format main '$remote_addr - $remote_user [$time_local] + "$request" $status $body_bytes_sent "$http_referer" + "$http_user_agent" $upstream_addr'; + access_log /var/log/nginx/access.log main; + + upstream oracle_one { + zone oracle_one 64k; + + # Production servers + server 172.31.11.210:8000 max_fails=0; + server 172.31.0.146:8000 max_fails=0; + + # Disaster recovery servers + server 172.33.111.210:8000 max_fails=0 backup; + server 172.33.100.146:8000 max_fails=0 backup; + + # Session persistence + sticky cookie ngxcookie; + } + + upstream oracle_two { + zone oracle_two 64k; + + # Production servers + server 172.31.11.211:8000 max_fails=0; + server 172.31.0.147:8000 max_fails=0; + + # Disaster recovery servers + server 172.33.111.211:8000 max_fails=0 backup; + server 172.33.100.147:8000 max_fails=0 backup; + + # Session persistence + sticky cookie ngxcookie; + } + + server { + listen 80; + status_zone oracle-http-redirect; + return 302 https://$http_host$request_uri; + } + + server { + listen 192.168.210.10:443 ssl; + http2 on; + server_name oracle-one.company.com; + ssl_certificate /etc/nginx/ssl/server_one.crt; + ssl_certificate_key /etc/nginx/ssl/server_one.key; + ssl_protocols TLSv1.2; + status_zone oracle-ssl-one; + proxy_cache cache_oracle_one; + + location / { + proxy_pass http://oracle_one; + proxy_set_header Host $host; + proxy_cache_valid any 1h; + } + + location @health_check { + internal; + proxy_connect_timeout 3s; + proxy_read_timeout 3s; + proxy_pass http://oracle_one; + proxy_set_header Host "oracle-one.company.com"; + health_check match=oracleok interval=4s + uri=/OA_HTML/AppsLocalLogin.jsp; + } + } + + server { + listen 192.168.210.11:443 ssl; + http2 on; + server_name oracle-two.company.com; + ssl_certificate /etc/nginx/ssl/server_two.crt; + ssl_certificate_key /etc/nginx/ssl/server_two.key; + ssl_protocols TLSv1.2; + status_zone oracle-ssl-two; + proxy_cache cache_oracle_two; + + location / { + proxy_pass http://oracle_two; + proxy_set_header Host $host; + proxy_cache_valid any 1h; + } + + location @health_check { + internal; + proxy_connect_timeout 3s; + proxy_read_timeout 3s; + proxy_pass http://oracle_two; + proxy_set_header Host "oracle-two.company.com"; + health_check match=oracleok interval=4s + uri=/OA_HTML/AppsLocalLogin.jsp; + } + } + + match oracleok { + status 200-399; + header X-ORACLE-DMS-ECID; + } + + # Live activity monitoring configuration + server { + # Status zone required for live activity monitoring. Enable it for + # every 'server' block in other configuration files. + status_zone status-page; + + # If NGINX Plus is listening on multiple IP addresses, uncomment this + # directive to restrict access to the live activity monitoring + # dashboard to a single IP address (substitute the appropriate + # address). + # listen 10.2.3.4:8080; + # Live activity monitoring is enabled on port 8080 by default. + listen 8080; + + # HTTP Basic authentication is enabled by default. Use an htpasswd + # generator to add users, or command-line and other management tools are + # readily available online. If you have Apache HTTP Server installed, you + # can reuse its htpasswd file. + #auth_basic on; + #auth_basic_user_file /etc/nginx/users; + + # Limit access to the dashboard to users on admin networks only. + # Uncomment the "allow" directive and change the IP address. + #allow 10.0.0.0/8; + deny all; + + # Enable the NGINX Plus API for metrics collection. + location /api { + api write=on; + access_log off; + } + + # NGINX Plus includes a built-in dashboard. + location = /dashboard.html { + root /usr/share/nginx/html; + } + + # Redirect requests made to the pre-R14 dashboard. + location = /status.html { + return 301 /dashboard.html; + } + + # Standard HTTP features are fully supported with the dashboard. + # Redirect request for '/' to '/dashboard.html'. + location = / { + return 301 /dashboard.html; + } + } +} + +``` + + +## Resources + +- [NGINX Plus Overview](https://www.nginx.com/products/nginx) +- [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/_index.md" >}}) +- [NGINX Wiki](https://www.nginx.com/resources/wiki/) + +### Revision History + +- Version 5 (May 2024) – Update about HTTP/2 support (the [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) directive) +- Version 4 (October 2019) – Update information about third-party monitoring tools (NGINX Plus R19) +- Version 3 (April 2018) – Updated information about high availability and the NGINX Plus API (NGINX Plus R13, NGINX Open Source 1.13.4) +- Version 2 (July 2017) – Update about HTTP/2 support (NGINX Plus R11 and later) +- Version 1 (November 2015) – Initial version (NGINX Plus R7, NGINX ß1.9.5) diff --git a/content/nginx/deployment-guides/load-balance-third-party/oracle-weblogic-server.md b/content/nginx/deployment-guides/load-balance-third-party/oracle-weblogic-server.md new file mode 100644 index 000000000..fd0ee147f --- /dev/null +++ b/content/nginx/deployment-guides/load-balance-third-party/oracle-weblogic-server.md @@ -0,0 +1,931 @@ +--- +description: Load balance Oracle WebLogic Servers with NGINX Open Source and the advanced + features in F5 NGINX Plus, following our step-by-step setup instructions. +docs: DOCS-455 +doctypes: +- task +title: Load Balancing Oracle WebLogic Server with NGINX Open Source and NGINX Plus +toc: true +weight: 100 +--- + +This deployment guide explains how to use NGINX Open Source and F5 NGINX Plus to load balance HTTP and HTTPS traffic across a pool of Oracle WebLogic Server application servers. The detailed instructions in this guide apply to both cloud‑based and on‑premises deployments of Oracle WebLogic Server. + + + +## About NGINX Open Source and NGINX Plus + +[NGINX Open Source](https://nginx.org/en) is an open source web server and reverse proxy that has grown in popularity in recent years because of its scalability, outstanding performance, and small footprint. NGINX Open Source was first created to solve the C10K problem (serving 10,000 simultaneous connections on a single web server). NGINX Open Source's features and performance have made it a staple of high‑performance sites – it's [the #1 web server at the 100,000 busiest websites in the world](https://w3techs.com/technologies/cross/web_server/ranking). + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of NGINX Open Source. NGINX Plus is a complete application delivery platform, extending the power of NGINX Open Source with a host of enterprise‑ready capabilities that enhance an Oracle WebLogic Server deployment and are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + +## About Oracle WebLogic Server and Oracle Java Cloud Service + +Oracle WebLogic Server is one of the industry's leading application servers for building and deploying Java Enterprise Edition Platform ([Java EE](https://www.oracle.com/java/technologies/java-ee-glance.html)) applications, with features for lowering cost of operations, improving performance, enhancing scalability, and supporting the [Oracle Applications](https://www.oracle.com/applications/) portfolio. + +We tested the procedures in this guide against [Oracle WebLogic Server Standard Edition 12cR2 (12.2.1)](https://www.oracle.com/middleware/technologies/fusionmiddleware-downloads.html), but the instructions also apply to WebLogic Server Enterprise Edition and WebLogic Suite. For more information about Oracle WebLogic server products, see the [Oracle website](https://www.oracle.com/middleware/weblogic/). + +Oracle WebLogic Server is also available on [Oracle Cloud](https://www.oracle.com/index.html) as a service called [Oracle Java Cloud Service](https://www.oracle.com/application-development/cloud-services/java/), with a variety of general‑purpose and high‑memory shapes and with full administrative control. + +You can use the WebLogic Server Administration Control graphical user interface to deploy and undeploy an application to an Oracle Java Cloud Service instance, just as you would deploy and undeploy the application to an on‑premises service instance. + +For more information about deploying a Java application on Oracle Java Cloud Service, see [Administering Oracle Java Cloud Service](https://docs.oracle.com/en/cloud/paas/java-cloud/jscug/toc.htm). + + +## Prerequisites and System Requirements + +- An Oracle WebLogic Server application server installed and configured on a physical or virtual system. +- A Linux system to host NGINX Open Source or NGINX Plus. To avoid potential conflicts with other applications, we recommend you install NGINX Plus on a fresh physical or virtual system. For the list of Linux distributions supported by NGINX Plus, see [NGINX Plus Technical Specifications]({{< relref "../../technical-specs.md" >}}). +- NGINX Open Source 1.9.5 and later, or NGINX Plus R7 and later. + +The instructions assume you have basic Linux system administration skills, including the following. Full instructions are not provided for these tasks. + +- Deploying and configuring a WebLogic Server application +- Installing Linux software from vendor‑supplied packages +- Editing configuration files +- Copying files between a central administrative system and Linux servers +- Running basic commands to start and stop services +- Reading log files + +### About Sample Values and Copying of Text + +- `example.com` is used as a sample domain name (in key names and configuration blocks). Replace it with your organization's name. +- Many NGINX Open Source and NGINX Plus configuration blocks in this guide list two sample WebLogic Server application servers with IP addresses 192.168.25.33 and 192.168.25.69. Replace these addresses with the IP addresses of your WebLogic Server servers. Include a line in the configuration block for each server if you have more or fewer than two. +- For readability reasons, some commands appear on multiple lines. If you want to copy and paste them into a terminal window, we recommend that you first copy them into a text editor, where you can substitute the object names that are appropriate for your deployment and remove any extraneous formatting characters that your browser might insert. +- Some of the examples in this guide are partial and require additional directives or parameters to be complete. You can download complete configuration files for basic and enhanced load balancing from the NGINX website, as instructed in [Creating and Modifying Configuration Files](#config-files). For details about a specific directive or parameter, see the [NGINX reference documentation](https://nginx.org/en/docs/). +- We recommend that you do not copy text from the configuration snippets in this guide into your configuration files. For the recommended way to create configuration files, see [Creating and Modifying Configuration Files](#config-files). + + +## Configuring an SSL/TLS Certificate for Client Traffic + +If you plan to enable SSL/TLS encryption of traffic between NGINX Open Source or NGINX Plus and clients of your WebLogic Server application, you need to configure a server certificate for NGINX Open Source or NGINX Plus. + +- SSL/TLS support is enabled by default in all [NGINX Plus packages](https://account.f5.com/myf5) and [NGINX Open Source binaries](https://nginx.org/en/linux_packages.html) provided by NGINX. +- If you are compiling NGINX Open Source from source, include the `--with-http_ssl_module` parameter to enable SSL/TLS support for HTTP traffic (the corresponding parameter for TCP is `--with-stream_ssl_module`, and for email is `--with-mail_ssl_module`, but this guide does not cover either of those protocol types). +- If using binaries from other providers, consult the provider documentation to determine if they support SSL/TLS. + +There are several ways to obtain a server certificate, including the following. For your convenience, step-by-step instructions are provided for the second and third options. + +- If you already have an SSL certificate for NGINX Open Source or NGINX Plus installed on another UNIX or Linux system (including systems running Apache HTTP Server), copy it to the **/etc/nginx/ssl** directory on the NGINX Open Source or NGINX Plus server. +- Generate a self‑signed certificate as described in [Generating a Self‑Signed Certificate](#certificate-self-signed) below. This is sufficient for testing scenarios, but clients of production deployments generally require a certificate signed by a certificate authority (CA). +- Request a new certificate from a CA or your organization's security group, as described in [Generating a Certificate Request](#certificate-request) below. + +For more details on SSL/TLS termination, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}). + + +### Generating a Self-Signed Certificate + +Generate a public‑private key pair and a self‑signed server certificate in PEM format that is based on them. + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Generate the key pair in PEM format (the default). To encrypt the private key, include the `-des3` parameter. (Other encryption algorithms are available, listed on the man page for the [genrsa](https://www.openssl.org/docs/manmaster/man1/openssl-genrsa.html) command.) You are prompted for the passphrase used as the basis for encryption. + + ```shell + root# openssl genrsa -des3 -out ~/private-key.pem 2048 + Generating RSA private key ... + Enter pass phrase for private-key.pem: + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/private-key.pem secure-dir/private-key.pem.backup + ``` + +4. Generate the certificate. Include the `-new` and `-x509` parameters to make a new self-signed certificate. Optionally include the `-days` parameter to change the key's validity lifetime from the default of 30 days (10950 days is about 30 years). Respond to the prompts with values appropriate for your testing deployment. + + ```shell + root# openssl req -new -x509 -key ~/private-key.pem -out ~/self-cert.pem -days 10950 + ``` + +5. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + +### Generating a Certificate Request + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Create a private key to be packaged in the certificate. + + ```shell + root# openssl genrsa -out ~/example.com.key 2048 + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/company.com.key secure-dir/example.com.key.backup + ``` + +4. Create a Certificate Signing Request (CSR) file. + + ```shell + root# openssl req -new -sha256 -key ~/example.com.key -out ~/example.com.csr + ``` + +5. Request a certificate from a CA or your internal security group, providing the CSR file (**company.com.csr**). As a reminder, never share private keys (**.key** files) directly with third parties. + + The certificate needs to be PEM format rather than in the Windows‑compatible PFX format. If you request the certificate from a CA website yourself, choose NGINX or Apache (if available) when asked to select the server platform for which to generate the certificate. + +6. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + +## Creating and Modifying Configuration Files + +To reduce errors, this guide has you copy directives from files provided by NGINX into your configuration files, instead of using a text editor to type in the directives yourself. Then you go through the sections in this guide (starting with [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)) to learn how to modify the directives as required for your deployment. + +As provided, there is one file for basic load balancing (with NGINX Open Source or NGINX Plus) and one file for enhanced load balancing (with NGINX Plus). If you are installing and configuring NGINX Open Source or NGINX Plus on a fresh Linux system and using it only to load balance WebLogic Server traffic, you can use the provided file as your main configuration file, which by convention is called **/etc/nginx/nginx.conf**. + +We recommend, however, that instead of a single configuration file you use the scheme that is set up automatically when you install an NGINX Plus package, especially if you already have an existing NGINX Open Source or NGINX Plus deployment or plan to expand your use of NGINX Open Source or NGINX Plus to other purposes in future. In the conventional scheme, the main configuration file is still called **/etc/nginx/nginx.conf**, but instead of including all directives in it, you create separate configuration files for different functions and store the files in the **/etc/nginx/conf.d** directory. You then use the `include` directive in the appropriate contexts of the main file to read in the contents of the function‑specific files. + +To download the complete configuration file for basic load balancing: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/weblogic-basic.conf > weblogic-basic.conf +``` + +To download the complete configuration file for enhanced load balancing: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/weblogic-enhanced.conf > weblogic-enhanced.conf +``` + +(You can also access the URL in a browser and download the file that way.) + +To set up the conventional configuration scheme, add an `http` configuration block in the main **nginx.conf** file, if it does not already exist. (The standard placement is below any global directives.) Add this `include` directive with the appropriate filename: + +```nginx +http { + include conf.d/weblogic-(basic|enhanced).conf; +} +``` + +You can also use wildcard notation to reference all files that pertain to a certain function or traffic type in the appropriate context block. For example, if you name all HTTP configuration files _function_-http.conf, this is an appropriate include directive: + +```nginx +http { + include conf.d/*-http.conf; +} +``` + +For reference purposes, the full configuration files are also provided in this document: + +- [Full Configuration for Basic Load Balancing](#full-configuration-basic) +- [Full Configuration for Enhanced Load Balancing](#full-configuration-enhanced) + +We recommend, however, that you do not copy text directly from this document. It does not necessarily use the same mechanisms for positioning text (such as line breaks and white space) as text editors do. In text copied into an editor, lines might run together and indenting of child statements in configuration blocks might be missing or inconsistent. The absence of formatting does not present a problem for NGINX Open Source or NGINX Plus, because (like many compilers) they ignore white space during parsing, relying solely on semicolons and curly braces as delimiters. The absence of white space does, however, make it more difficult for humans to interpret the configuration and modify it without making mistakes. + +### About Reloading Updated Configuration + +We recommend that each time you complete a set of updates to the configuration, you run the `nginx` `-t` command to test the configuration file for syntactic validity. + +```none +root# nginx -t +nginx: the configuration file /etc/nginx/nginx.conf syntax is ok +nginx: configuration file /etc/nginx/nginx.conf test is successful +``` + +To tell NGINX Plus to start using the new configuration, run one of the following commands: + +```none +root# nginx -s reload +``` + +or + +```none +root# service nginx reload +``` + + +## Configuring Basic Load Balancing with NGINX Open Source or NGINX Plus + +This section explains how to set up NGINX Open Source or NGINX Plus as a load balancer in front of two WebLogic Server servers. The instructions in the first two sections are mandatory: + +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) +- [Configuring Basic Load Balancing](#load-balancing-basic) + +The instructions in the remaining sections are optional, depending on the requirements of your application: + +- [Configuring Basic Session Persistence](#session-persistence-basic) +- [Configuring Proxy of WebSocket Traffic](#websocket) +- [Configuring Content Caching](#caching) +- [Configuring HTTP/2 Support](#http2) + +The complete configuration file appears in [Full Configuration for Basic Load Balancing](#full-configuration-basic). + +If you are using NGINX Plus, you can configure additional enhanced features after you complete the configuration of basic load balancing. See [Configuring Enhanced Load Balancing with NGINX Plus](#enhanced). + + +### Configuring Virtual Servers for HTTP and HTTPS Traffic + +These directives define virtual servers for HTTP and HTTPS traffic in separate `server` blocks in the top‑level `http` configuration block. All HTTP requests are redirected to the HTTPS server. + +1. Configure a `server` block that listens for requests for **"https://example.com"** received on port 443. + + The `ssl_certificate` and `ssl_certificate_key` directives are required; substitute the names of the certificate and private key you chose in [Configuring an SSL/TLS Certificate for Client Traffic](#tls-certificate). Here we use the filenames – **server.crt** and **server.key** – specified in the configuration file that we downloaded from the NGINX website in [Creating and Modifying Configuration Files](#config-files). + + The other directives are optional but recommended. + + ```nginx + # In the 'http' block + server { + listen 443 ssl; + server_name example.com; + + ssl_certificate /etc/nginx/ssl/server.crt; + ssl_certificate_key /etc/nginx/ssl/server.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name), [ssl_certificate and ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate), [ssl_prefer_server_ciphers](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_prefer_server_ciphers), [ssl_session_cache](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) + +2. Configure a `server` block that permanently redirects requests received on port 80 for **"http://example.com"** to the HTTPS server defined in the previous step. + + If you're not using SSL for client connections, omit the `location` block. When instructed in the remainder of this guide to add directives to the `server` block for HTTPS traffic, add them to this block instead. + + ```nginx + # In the 'http' block + server { + listen 80; + server_name example.com; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) + +For more information on configuring SSL/TLS, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the reference documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + +### Configuring Basic Load Balancing + +To configure load balancing, you first create a named _upstream group_, which lists your backend servers. You then set up NGINX Open Source or NGINX Plus as a reverse proxy and load balancer by referring to the upstream group in one or more `proxy_pass` directives. + +By putting NGINX Open Source or NGINX Plus in front of WebLogic Server servers, you gain a number of benefits in concurrency, resiliency, and scalability, and can take advantage of NGINX's Layer 7 routing, SSL offloading, content caching, and other features. You can read more about those functions and features in [Reverse Proxy Using NGINX Plus](https://www.nginx.com/blog/reverse-proxy-using-nginx-plus/). + +1. Configure an _upstream group_ called **weblogic** with two WebLogic Server application servers listening on port 7001, one on IP address 192.168.25.33 and the other on 192.168.25.69. + + Note that this block applies only to HTTP and HTTPS traffic, because we're placing it in the `http` block. For information about load balancing WebSocket traffic, see [Configuring Proxy of WebSocket Traffic](#websocket). + + ```nginx + # In the 'http' block + upstream weblogic { + server 192.168.25.33:7001; + server 192.168.25.69:7001; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +2. In the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), include two `location` blocks: + + - The first one matches HTTPS requests in which the path starts with /weblogic-app/, and proxies them to the **weblogic** upstream group we created in the previous step. + + - The second one funnels all traffic to the first `location` block, by doing a temporary redirect of all requests for **"http://example.com/"**. + + ```nginx + # In the 'server' block for HTTPS traffic + location /weblogic-app/ { + proxy_pass http://weblogic; + } + + location = / { + return 302 /weblogic-app/; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) + +By default, NGINX Open Source and NGINX Plus use the Round Robin algorithm for load balancing among servers. The load balancer runs through the list of servers in the upstream group in order, forwarding each new request to the next server. In our example, the first request goes to 192.168.25.33, the second to 192.168.25.69, the third to 192.168.25.33, and so on. For information about the other available load‑balancing algorithms, see [Load‑Balancing Methods](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-methods). + +In NGINX Plus, you can also set up dynamic reconfiguration of an upstream group when the set of backend servers changes, using DNS or an API; see [Enabling Dynamic Reconfiguration of Upstream Groups](#reconfiguration). + +For more information on proxying and load balancing, see [NGINX Reverse Proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) and [HTTP Load Balancing]({{< relref "../../admin-guide/load-balancer/http-load-balancer.md" >}}) in the NGINX Plus Admin Guide, and the reference documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) and [Upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html) modules. + + +### Configuring Basic Session Persistence + +If your application requires basic session persistence (also known as _sticky sessions_), you can implement it in NGINX Open Source by using the IP Hash load‑balancing algorithm. (NGINX Plus offers a more sophisticated form of session persistence, as described in [Configuring Advanced Session Persistence](#session-persistence-advanced).) + +With the IP Hash algorithm, for each request a hash based on the client's IP address is calculated and associated with one of the upstream servers. All requests with that hash are sent to that server, thus establishing session persistence. + +If the client has an IPv6 address, the hash is based on the entire address. If it has an IPv4 address, the hash is based on just the first three octets of the address. This is designed to optimize for ISP clients that are assigned IP addresses dynamically from a subnetwork (/24) range. However, it is not effective in these cases: + +- The majority of the traffic to your site is coming from one forward proxy or from clients on the same /24 network, because in that case IP Hash maps all clients to the same server. + +- A client's IP address can change during the session, for example when a mobile client switches from a WiFi network to a cellular one. + +To configure session persistence in NGINX, add the `ip_hash` directive to the `upstream` block created in [Configuring Basic Load Balancing](#load-balancing-basic): + +```nginx +# In the 'http' block +upstream weblogic { + ip_hash; + server 192.168.25.33:7001; + server 192.168.25.69:7001; +} +``` + +Directive documentation: [ip_hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash), [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +You can also use the Hash load‑balancing method for session persistence, with the hash based on any combination of text and [NGINX variables](https://nginx.org/en/docs/varindex.html) you specify. For example, the following configuration hashes on full (four‑octet) client IP addresses. + +```nginx +# In the 'http' block +upstream weblogic { + hash $remote_addr; + server 192.168.25.33:7001; + server 192.168.25.69:7001; +} +``` + +Directive documentation: [hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash), [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + + +### Configuring Proxy of WebSocket Traffic + +WebLogic Server supports the WebSocket protocol ([RFC 6455](https://tools.ietf.org/html/rfc6455)), which provides simultaneous two‑way communication over a single TCP connection between clients and servers, where each side can send data independently from the other. To initiate the WebSocket connection, the client sends a handshake request to the server, upgrading the request from standard HTTP to WebSocket. The connection is established if the handshake request passes validation, and the server accepts the request. When a WebSocket connection is created, a browser client can send data to a WebLogic Server instance while simultaneously receiving data from that instance. + +To learn more about how WebLogic Server handles WebSocket traffic, see [Using WebSockets in WebLogic Server](https://docs.oracle.com/middleware/1212/wls/WLPRG/websockets.htm). + +NGINX Open Source and NGINX Plus by default use HTTP/1.0 for upstream connections. To be proxied correctly, WebSocket connections require HTTP/1.1 along with some other configuration directives that set HTTP headers: + +```nginx +# In the 'http' block +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +# In the 'server' block for HTTPS traffic +location /wstunnel/ { + proxy_pass http://weblogic; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; +} +``` + +Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map), [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + +The first `proxy_set_header` directive is needed because the `Upgrade` request header is hop-by-hop; that is, the HTTP specification explicitly forbids proxies from forwarding it. This directive overrides the prohibition. + +The second `proxy_set_header` directive sets the `Connection` header to a value that depends on the test in the `map` block: if the request has an `Upgrade` header, the `Connection` header is set to `upgrade`; otherwise, it is set to `close`. + +For more information about proxying WebSocket traffic, see [WebSocket proxying](https://nginx.org/en/docs/http/websocket.html) and [NGINX as a WebSocket Proxy](https://www.nginx.com/blog/websocket-nginx/). + + +### Configuring Content Caching + +Caching assets at the edge of your infrastructure can have many benefits. Even caching a small percent of the requests to your clients for a short period of time can drastically improve the performance of your web applications. + +One choice for caching is [Oracle Web Cache](https://www.oracle.com/middleware/technologies/webtier.html#WebCache), a reverse proxy cache and compression engine that can be deployed between the client and server. + +Another alternative is the scalable disk‑based cache in NGINX Open Source and NGINX Plus, which integrates with their reverse proxy capability. There are a variety of useful directives that can be used to finetune caching behavior; for a detailed discussion, see [A Guide to Caching with NGINX and NGINX Plus](https://www.nginx.com/blog/nginx-caching-guide/). + +To create a very simple caching configuration: + +1. Include the `proxy_cache_path` directive to create the local disk directory **/tmp/NGINX_cache/** for use as a cache. The `keys_zone` parameter allocates 10 megabytes (MB) of shared memory for a zone called **backcache**, which is used to store cache keys and metadata such as usage timers. A 1‑MB zone can store data for about 8,000 keys. + + ```nginx + # In the 'http' block + proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + ``` + + Directive documentation: [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + +2. In the `location` block that matches HTTPS requests in which the path starts with /weblogic-app/, include the `proxy_cache` directive to reference the cache created in the previous step. + + ```nginx + # In the 'server' block for HTTPS traffic + location /weblogic-app/ { + proxy_pass http://weblogic; + proxy_cache backcache; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) + +By default, the cache key is similar to this string of [NGINX variables](https://nginx.org/en/docs/varindex.html): `$scheme$proxy_host$request_uri`. To change the list of variables, specify them with the `proxy_cache_key` directive. One effective use of this directive is to create a cache key for each user based on the `JSESSIONID` cookie. This is useful when the cache is private, for example containing shopping cart data or other user‑specific resources. Include the `JSESSIONID` cookie in the cache key with this directive: + +```nginx +proxy_cache_key $proxy_host$request_uri$cookie_jessionid; +``` + +Directive documentation: [proxy_cache_key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key) + +For more complete information on caching, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) and the reference documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) module. + + +### Configuring HTTP/2 Support + +HTTP/2 is fully supported in both NGINX 1.9.5 and later, and NGINX Plus R7 and later. As always, we recommend you run the latest version of software to take advantage of improvements and bug fixes. + +- If using NGINX Open Source, note that in version 1.9.5 and later the SPDY module is completely removed from the codebase and replaced with the [HTTP/2](https://nginx.org/en/docs/http/ngx_http_v2_module.html) module. After upgrading to version 1.9.5 or later, you can no longer configure NGINX Open Source to use SPDY. If you want to keep using SPDY, you need to compile NGINX Open Source from the sources in the [NGINX 1.8.x branch](https://nginx.org/en/download.html). + +- If using NGINX Plus, in R11 and later the nginx-plus package supports HTTP/2 by default, and the nginx-plus-extras package available in previous releases is deprecated by separate [dynamic modules](https://www.nginx.com/products/nginx/modules/) authored by NGINX. + + In NGINX Plus R8 through R10, the nginx-plus and nginx-plus-extras packages support HTTP/2 by default. + + In NGINX Plus R8 and later, NGINX Plus supports HTTP/2 by default, and does not support SPDY. + + If using NGINX Plus R7, you must install the nginx-plus-http2 package instead of the nginx-plus or nginx-plus-extras package. + +To enable HTTP/2 support, add the `http2` directive in the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), so that it looks like this: + +```nginx +# In the 'server' block for HTTPS traffic +listen 443 ssl; +http2 on; +``` + +Directive documentation: [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) + +To verify that HTTP/2 translation is working, you can use the "HTTP/2 and SPDY indicator" plug‑in available for [Google Chrome](https://chrome.google.com/webstore/detail/http2-and-spdy-indicator/mpbpobfflnpcgagjijhmgnchggcjblin?hl=en) and [Firefox](https://addons.mozilla.org/en-US/firefox/addon/http2-indicator/). + + +### Full Configuration for Basic Load Balancing + +The full configuration for basic load balancing appears here for your convenience. It goes in the `http` context. The complete file is available for [download](https://www.nginx.com/resource/conf/weblogic-basic.conf) from the NGINX website. + +We recommend that you do not copy text directly from this document, but instead use the method described in [Creating and Modifying Configuration Files](#config-files) to include these directives in your configuration – add an `include` directive to the `http` context of the main **nginx.conf** file to read in the contents of /etc/nginx/conf.d/weblogic-basic.conf. + +```nginx +proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + +# WebSocket configuration +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +upstream weblogic { + # Use IP Hash for session persistence + ip_hash; + + # List of WebLogic Server application servers + server 192.168.25.33:7001; + server 192.168.25.69:7001; +} + +server { + listen 80; + server_name example.com; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } +} + +server { + listen 443 ssl; + http2 on; + + server_name example.com; + + ssl_certificate /etc/nginx/ssl/certificate-name; + ssl_certificate_key /etc/nginx/ssl/private-key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + + # Load balance requests for '/weblogic-app/' across WebLogic Server + # application servers + location /weblogic-app/ { + proxy_pass http://weblogic; + proxy_cache backcache; + } + + # Return a temporary redirect to '/weblogic-app/' when user requests '/' + location = / { + return 302 /weblogic-app/; + } + + # WebSocket configuration + location /wstunnel/ { + proxy_pass https://weblogic; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } +} +``` + + +## Configuring Enhanced Load Balancing with NGINX Plus + +This section explains how to configure enhanced load balancing with some of the extended features in NGINX Plus. + +**Note:** Before setting up the enhanced features described in this section, you must complete the instructions for basic load balancing in these two sections: + +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) +- [Configuring Basic Load Balancing](#load-balancing-basic) + +Except as noted, all optional basic features (described in the other subsections of [Configuring Basic Load Balancing in NGINX Open Source and NGINX Plus](#basic)) can be combined with the enhanced features described here. + +The features described in the following sections are all optional. + +- [Configuring Advanced Session Persistence](#session-persistence-advanced) +- [Configuring Application Health Checks](#health-checks) +- [Configuring Content Cache Purging](#cache-purging) +- [Enabling Live Activity Monitoring](#live-activity-monitoring) +- [Enabling Dynamic Reconfiguration of Upstream Groups](#reconfiguration) + +The complete configuration file appears in [Full Configuration for Enhanced Load Balancing](#full-configuration-enhanced). + + +### Configuring Advanced Session Persistence + +NGINX Plus has more sophisticated session persistence methods available than open source NGINX, implemented in three variants of the `sticky` directive. In the following example, we add the `sticky learn` directive to the upstream group we created in [Configuring Basic Load Balancing](#load-balancing-basic). + +1. Remove or comment out the `ip_hash` directive, leaving only the `server` directives: + + ```nginx + # In the 'http' block + upstream weblogic { + #ip_hash; + server 192.168.25.33:7001; + server 192.168.25.69:7001; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +2. Configure session persistence with this `sticky learn` directive, which refers to the `JSESSIONID` cookie created by your Oracle WebLogic Server application as the session identifier. + + - The `create` and `lookup` parameters to `sticky learn` specify how new sessions are created and existing sessions are searched for, respectively. For new sessions, NGINX Plus sets the session identifier to the value of the `$upstream_cookie_JSESSIONID` variable, which captures the `JSESSIONID` cookie sent by the WebLogic Server application server. When checking for existing sessions, it uses the `JSESSIONID` cookie sent by the client (the `$cookie_JSESSIONID` variable) as the session identifier. + + Both parameters can be specified more than once (each time with a different variable), in which case NGINX Plus uses the first non‑empty variable for each one. + + - The `zone` argument creates a shared memory zone for storing information about sessions. The amount of memory allocated – here, 1 MB – determines how many sessions can be stored at a time (the number varies by platform). The name assigned to the zone – here, `client_sessions` – must be unique for each `sticky` directive. + + ```nginx + # In the 'http' block + upstream weblogic { + server 192.168.25.33:7001; + server 192.168.25.69:7001; + sticky learn create=$upstream_cookie_JSESSIONID + lookup=$cookie_JSESSIONID + zone=client_sessions:1m; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky learn`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +For more information on session persistence, see the NGINX Plus Admin Guide. + + +### Configuring Application Health Checks + +Health checks are out‑of‑band HTTP requests sent to a server at fixed intervals. They are used to determine whether a server is responsive and functioning correctly, without requiring an actual request from a client. + +Because the `health_check` directive is placed in the `location` block, we can enable different health checks for each application. + +1. In the `location` block that matches HTTPS requests in which the path starts with /weblogic-app/ (created in [Configuring Basic Load Balancing](#load-balancing-basic)), add the `health_check` directive. + + Here we configure NGINX Plus to send an out‑of‑band request for the URI **/benefits** to each of the servers in the **weblogic** upstream group every 5 seconds (the default frequency). If a server does not respond correctly, it is marked down and NGINX Plus stops sending requests to it until it passes a subsequent health check. We include the `match` parameter to the `health_check` directive to define a nondefault set of health‑check tests. + + ```nginx + # In the 'server' block for HTTPS traffic + location /weblogic-app/ { + proxy_pass http://weblogic; + proxy_cache backcache; + health_check uri=/benefits match=health_check; + } + ``` + + Directive documentation: [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) + +2. In the `http` context, include a `match` directive to define the tests that a server must pass to be considered functional. In this example, it must return status code `200`, the `Content-Type` response header must be `text/html`, and the response body must match the indicated regular expression. + + ```nginx + # In the 'http' block + match health_check { + status 200; + header Content-Type = text/html; + body ~ "Welcome To Dizzyworld"; + } + ``` + + Directive documentation: [match](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) + +3. In the **weblogic** upstream group, include the `zone` directive to define a shared memory zone that stores the group's configuration and run‑time state, which are shared among worker processes. + + ```nginx + # In the 'http' block + upstream weblogic { + zone weblogic 64k; + server 192.168.25.33:7001; + server 192.168.25.69:7001; + # ... + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +NGINX Plus also has a slow‑start feature that is a useful auxiliary to health checks. When a failed server recovers, or a new server is added to the upstream group, NGINX Plus slowly ramps up the traffic to it over a defined period of time. This gives the server time to "warm up" without being overwhelmed by more connections than it can handle as it starts up. For more information, see the NGINX Plus Admin Guide. + +For example, to set a slow‑start period of 30 seconds for your WebLogic Server application servers, include the `slow_start` parameter to their `server` directives: + +```nginx +# In the 'upstream' block +server 192.168.25.33:7001 slow_start=30s; +server 192.168.25.69:7001 slow_start=30s; +``` + +Parameter documentation: [slow_start](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#slow_start) + +For information about customizing health checks, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/load-balancer/http-health-check.md" >}}). + + +### Configuring Content Cache Purging + +NGINX Plus has an API for managing the cache. You can, for example, purge items with a particular key from the cache when you know they are stale, even if they're not actually expired. This can be configured using the `proxy_cache_purge` directive. The following sample configuration combines the `proxy_cache_purge` and `map` directives to enable use of the HTTP `PURGE` method to delete cached content with a specified key: + +```nginx +# In the 'http' block +proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + +map $request_method $purge_method { + PURGE 1; + default 0; +} + +# In the 'server' block for HTTPS traffic +location /weblogic-app/ { + proxy_cache backcache; + proxy_pass http://weblogic; + proxy_cache_purge $purge_method; +} +``` + +Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map), [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path), [proxy_cache_purge](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_purge), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) + +With this configuration in place, from any HTTP client you can clear the cache for a particular key. The following sample `curl` command shows how to use the API to send an HTTP `PURGE` request to the NGINX Open Source server that hosts **"www.example.com"**. The example further illustrates how you can purge an entire set of resources that have a common URL stem, by appending the asterisk (*) wildcard to the URL. + +```shell +curl -X PURGE -D - "http://www.example.com/*" +HTTP/1.1 204 No Content +Server: nginx/1.5.12 +Date: Sat, 03 May 2014 16:33:04 GMT +Connection: keep-alive +``` + +For more complete information about NGINX Open Source and NGINX Plus' caching capabilities, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}). + + +### Enabling Live Activity Monitoring + +NGINX Plus includes a live activity monitoring interface that provides key load and performance metrics in real time, including TCP metrics in NGINX Plus R6 and later. Statistics are reported through a RESTful JSON interface, making it very easy to feed the data to a custom or third‑party monitoring tool. There is also a built‑in dashboard. Follow these instructions to deploy it. + +Dashboard tab in NGINX Plus live activity monitoring dashboard + +For more information about live activity monitoring, see the [NGINX Plus Admin Guide.]({{< relref "../../admin-guide/monitoring/live-activity-monitoring.md" >}}) + +The quickest way to configure live activity monitoring and the built‑in dashboard is to download the sample configuration file from the NGINX website and modify it as necessary. For more complete instructions, see [Live Activity Monitoring of NGINX Plus in 3 Simple Steps](https://www.nginx.com/blog/live-activity-monitoring-nginx-plus-3-simple-steps/) on our blog. + +1. Download the **status.conf** file to the NGINX Plus server: + + ```none + # cd /etc/nginx/conf.d + # curl https://www.nginx.com/resource/conf/status.conf > status.conf + ``` + +2. Verify that the main configuration file (**/etc/nginx/nginx.conf**) has an `include` directive that reads the file into the `http` context. The following is specific to **status.conf**, but a wildcard version also works: + + ```nginx + # In the 'http' block in nginx.conf + include conf.d/status.conf; + ``` + + Directive documentation: [include](https://nginx.org/en/docs/ngx_core_module.html#include) + +3. Customize the file for your deployment as specified by comments in the file. In particular, the default settings in the file allow anyone on any network to access the dashboard. We strongly recommend that you restrict access to the dashboard with one or more of the following methods: + + - **IP address‑based access control lists (ACLs)**. In the sample configuration file, uncomment the `allow` and `deny` directives, and substitute the address of your administrative network for 10.0.0.0/8. Only users on the specified network can access the status page. + + ```nginx + allow 10.0.0.0/8; + deny all; + ``` + + Directive documentation: [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html) + + - **HTTP Basic authentication**. In the sample configuration file, uncomment the `auth_basic` and `auth_basic_user_file` directives and add user entries to the **/etc/nginx/users** file (for example, by using an [htpasswd generator](https://httpd.apache.org/docs/2.4/programs/htpasswd.html)). If you have an Apache installation, another option is to reuse an existing **htpasswd** file. + + ```nginx + auth_basic on; + auth_basic_user_file /etc/nginx/users; + ``` + + Directive documentation: [auth_basic and auth_basic_user_file](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) + + - **Client certificates**, which are part of a complete configuration of SSL/TLS. For more information, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the reference documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + - **Firewall**. Configure your firewall to disallow outside access to the port for the dashboard (8080 in the sample configuration file). + +3. In each upstream group that you want to monitor, include the `zone` directive to define a shared memory zone that stores the group's configuration and run‑time state, which are shared among worker processes. + + For example, to monitor your WebLogic Server application servers, add the `zone` directive to the **weblogic** upstream group (if you followed the instructions in [Configuring Application Health Checks](#health-checks), you already made this change). + + ```nginx + # In the 'http' block + upstream weblogic { + zone weblogic 64k; + server 192.168.25.33:7001; + server 192.168.25.69:7001; + # ... + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +4. In the `server` block for HTTPS traffic (created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)), add the `status_zone` directive: + + ```nginx + # In the 'server' block for HTTPS traffic + status_zone weblogic-ssl; + ``` + + Directive documentation: [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone) + +When you reload the NGINX Plus configuration file, for example by running the `nginx -s reload` command, the NGINX Plus dashboard is available immediately at http://_nginx-plus-server-address_:8080. + + +### Enabling Dynamic Reconfiguration of Upstream Groups + +With NGINX Plus, you can reconfigure load‑balanced server groups (both HTTP and TCP/UDP) dynamically using either DNS or the NGINX Plus API introduced in NGINX Plus R13. See the NGINX Plus Admin Guide for a more detailed discussion of the DNS and [API]({{< relref "../../admin-guide/load-balancer/dynamic-configuration-api.md" >}}) methods. + +To enable dynamic reconfiguration of your upstream group of WebLogic Server app servers using the NGINX Plus API: + +1. Include the `zone` directive in the **weblogic** upstream group to create a shared memory zone that stores the group's configuration and run‑time state, which are shared among worker processes. (If you followed the instructions in [Configuring Application Health Checks](#health-checks) or [Enabling Live Activity Monitoring](#live-activity-monitoring), you already made this change.) + + ```nginx + # In the 'http' block + upstream weblogic { + zone weblogic 64k; + server 192.168.25.33:7001; + server 192.168.25.69:7001; + # ... + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +2. In the `server` block for HTTPS traffic (created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)), add a new `location` block for the NGINX Plus API, containing the `api` directive (**/api** is also the conventional name for the location, as used here). + + (If you configured [live activity monitoring](#live-activity-monitoring) by downloading the **status.conf** file, it already includes this block.) + + We strongly recommend that you restrict access to the location so that only authorized administrators can access the NGINX Plus API. The `allow` and `deny` directives in the following example permit access only from the localhost address (127.0.0.1). + + ```nginx + # In the 'server' block for HTTPS traffic + location /api { + api write=on; + allow 127.0.0.1; + deny all; + } + ``` + + Directive documentation: [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html), [api](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) + + +### Full Configuration for Enhanced Load Balancing + +The full configuration for enhanced load balancing appears here for your convenience. It goes in the `http` context. The complete file is available for [download](https://www.nginx.com/resource/conf/weblogic-enhanced.conf) from the NGINX website. + +We recommend that you do not copy text directly from this document, but instead use the method described in [Creating and Modifying Configuration Files](#config-files) to include these directives in your configuration – add an `include` directive to the `http` context of the main **nginx.conf** file to read in the contents of /etc/nginx/conf.d/weblogic-enhanced.conf. + +```nginx +proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + +# WebSocket configuration +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +# Map the PURGE method to the request method, for cache purging +map $request_method $purge_method { + PURGE 1; + default 0; +} + +# Application health checks +match health_check { + status 200; + header Content-Type = text/html; + body ~ "Welcome To Dizzyworld"; +} + +upstream weblogic { + # Shared memory zone for application health checks, live activity + # monitoring, and dynamic reconfiguration + zone weblogic 64k; + + # List of WebLogic Server application servers + server 192.168.25.33:7001 slow_start=30s; + server 192.168.25.69:7001 slow_start=30s; + + # Session persistence based on JSESSIONID + sticky learn create=$upstream_cookie_JSESSIONID + lookup=$cookie_JSESSIONID + zone=client_sessions:1m; +} + +server { + listen 80; + server_name example.com; + + # Required for live activity monitoring of HTTP traffic + status_zone weblogic; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } +} + +server { + listen 443 ssl; + http2 on; + + server_name example.com; + + # Required for live activity monitoring of HTTPS traffic + status_zone weblogic-ssl; + ssl_certificate /etc/nginx/ssl/certificate-name; + ssl_certificate_key /etc/nginx/ssl/private-key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + + # Load balance requests for '/weblogic-app/' across WebLogic Server + # application servers + location /weblogic-app/ { + proxy_pass http://weblogic; + proxy_cache backcache; + + # Enable purging of the content cache + proxy_cache_purge $purge_method; + + # Active health checks + health_check uri=/benefits match=health_check; + } + + # Return a 302 redirect to '/weblogic-app/' when user requests '/' + location = / { + return 302 /weblogic-app/; + } + + # WebSocket configuration + location /wstunnel/ { + proxy_pass http://weblogic; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } + + # Secured access to the NGINX Plus API + location /api { + api write=on; + allow 127.0.0.1; # Permit access from localhost + deny all; # Deny access from everywhere else + } +} + +``` + + +## Resources + +- [NGINX Plus Overview](https://www.nginx.com/products/nginx) +- [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/_index.md" >}}) +- [NGINX Wiki](https://www.nginx.com/resources/wiki/) + +### Revision History + +- Version 5 (May 2024) – Update about HTTP/2 support (the [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) directive) +- Version 4 (April 2018) – Update about the NGINX Plus API (NGINX Plus R13, NGINX Open Source 1.13.4) +- Version 3 (April 2017) – Update about HTTP/2 support (NGINX Plus R11 and later) +- Version 2 (January 2016) – Update about HTTP/2 support (NGINX Plus R8, NGINX 1.9.9) +- Version 1 (December 2015) – Initial version (NGINX Plus R7, NGINX 1.9.5) diff --git a/content/nginx/deployment-guides/load-balance-third-party/wildfly.md b/content/nginx/deployment-guides/load-balance-third-party/wildfly.md new file mode 100644 index 000000000..6d5e13c38 --- /dev/null +++ b/content/nginx/deployment-guides/load-balance-third-party/wildfly.md @@ -0,0 +1,899 @@ +--- +description: Load balance Wildfly (JBoss) application servers with NGINX Open Source + or the advanced features in F5 NGINX Plus, following our step-by-step setup instructions. +docs: DOCS-456 +doctypes: +- task +title: Load Balancing Wildfly and JBoss Application Servers with NGINX Open Source + and NGINX Plus +toc: true +weight: 100 +--- + +This deployment guide explains how to use NGINX Open Source and F5 NGINX Plus to load balance HTTP and HTTPS traffic across a pool of Wildfly (JBoss) application servers. It provides complete instructions for configuring NGINX Open Source or NGINX Plus as required. + + + +## About NGINX Open Source and NGINX Plus + +[NGINX Open Source](https://nginx.org/en) is an open source web server and reverse proxy that has grown in popularity in recent years because of its scalability, outstanding performance, and small footprint. NGINX Open Source was first created to solve the C10K problem (serving 10,000 simultaneous connections on a single web server). NGINX Open Source's features and performance have made it a staple of high‑performance sites – it's [the #1 web server at the 100,000 busiest websites in the world](https://w3techs.com/technologies/cross/web_server/ranking). + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of NGINX Open Source. NGINX Plus is a complete application delivery platform, extending the power of NGINX Open Source with a host of enterprise-ready capabilities that enhance a JBoss application server deployment and are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + +## About Wildfly and JBoss + +[Wildfly](https://www.wildfly.org/) is an application server that before 2013 was called the _JBoss Application Server_ or simply _JBoss_. It implements the Java Enterprise Edition 7 Platform (Java EE, formerly known as J2EE) for developing and deploying enterprise Java applications, portals, and web applications and services. Java EE allows the use of standardized modular components and enables the Java platform to handle many aspects of programming automatically. As free and open source software, WildFly is distributed under the GNU Lesser General Public License (LGPL), version 2.1. This guide was developed and tested with Wildfly 9. + +The commercially supported version of the Wildfly software is the [Red Hat JBoss Enterprise Application Platform](https://developers.redhat.com/products/eap/overview), and this guide applies to it and other commercial JBoss application servers as well. + + +## Prerequisites and System Requirements + +- A Wildfly or JBoss application server installed and configured on a physical or virtual system. +- A Linux system to host NGINX Open Source or NGINX Plus. To avoid potential conflicts with other applications, we recommend you install NGINX Plus on a fresh physical or virtual system. For the list of Linux distributions supported by NGINX Plus, see [NGINX Plus Technical Specifications]({{< relref "../../technical-specs.md" >}}). +- NGINX Open Source 1.9.5 or later, or NGINX Plus R7 or later. + +The instructions assume you have basic Linux system administration skills, including the following. Full instructions are not provided for these tasks. + +- Configuring and deploying a Wildfly application +- Installing Linux software from vendor‑supplied packages +- Editing configuration files +- Copying files between a central administrative system and Linux servers +- Running basic commands to start and stop services +- Reading log files + +### About Sample Values and Copying of Text + +- `example.com` is used as a sample organization name (in key names and configuration blocks). Replace it with your organization's name. +- Many NGINX Open Source and NGINX Plus configuration blocks in this guide list two sample Wildfly application servers with IP addresses 192.168.33.11 and 192.168.33.12. Replace these addresses with the IP addresses of your Wildfly servers. Include a line in the configuration block for each server if you have more or fewer than two. +- For readability reasons, some commands appear on multiple lines. If you want to copy and paste them into a terminal window, we recommend that you first copy them into a text editor, where you can substitute the object names that are appropriate for your deployment and remove any extraneous formatting characters that your browser might insert. +- We recommend that you do not copy text from the configuration snippets in this guide into your configuration files. For the recommended way to create configuration files, see [Creating and Modifying Configuration Files](#config-files). + + +## Configuring an SSL/TLS Certificate for Client Traffic + +If you plan to enable SSL/TLS encryption of traffic between NGINX Open Source or NGINX Plus and clients of your Wildfly application, you need to configure a server certificate for NGINX Open Source or NGINX Plus. + +- SSL/TLS support is enabled by default in all [NGINX Plus packages](https://cs.nginx.com/) and [NGINX Open Source binaries](https://nginx.org/en/linux_packages.html) provided by NGINX. +- If you are compiling NGINX Open Source from source, include the `--with-http_ssl_module` parameter to enable SSL/TLS support for HTTP traffic (the corresponding parameter for TCP is `--with-stream_ssl_module`, and for email is `--with-mail_ssl_module`, but this guide does not cover either of those protocol types). +- If using binaries from another provider, consult the provider documentation to determine if it supports SSL/TLS. + +There are several ways to obtain a server certificate, including the following. For your convenience, step-by-step instructions are provided for the second and third options. + +- If you already have an SSL/TLS certificate for NGINX Open Source or NGINX Plus installed on another UNIX or Linux system (including systems running Apache HTTP Server), copy it to the **/etc/nginx/ssl** directory on the NGINX Open Source or NGINX Plus server. +- Generate a self‑signed certificate as described in [Generating a Self‑Signed Certificate](#certificate-self-signed) below. This is sufficient for testing scenarios, but clients of production deployments generally require a certificate signed by a certificate authority (CA). +- Request a new certificate from a CA or your organization's security group, as described in [Generating a Certificate Request](#certificate-request) below. + +For more details on SSL/TLS termination, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}). + + +### Generating a Self-Signed Certificate + +Generate a public‑private key pair and a self‑signed server certificate in PEM format that is based on them. + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Generate the key pair in PEM format (the default). To encrypt the private key, include the `-des3` parameter. (Other encryption algorithms are available, listed on the man page for the [genrsa](https://www.openssl.org/docs/manmaster/man1/openssl-genrsa.html) command.) You are prompted for the passphrase used as the basis for encryption. + + ```shell + root# openssl genrsa -des3 -out ~/private-key.pem 2048 + Generating RSA private key + Enter pass phrase for private-key.pem: + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/private-key.pem /private-key.pem.backup + ``` + +4. Generate the certificate. Include the `-new` and `-x509` parameters to make a new self‑signed certificate. Optionally include the `-days` parameter to change the key's validity lifetime from the default of 30 days (10950 days is about 30 years). Respond to the prompts with values appropriate for your testing deployment. + + ```shell + root# openssl req -new -x509 -key ~/private-key.pem -out ~/self-cert.pem -days 10950 + ``` + +5. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Open Source or NGINX Plus server. + + +### Generating a Certificate Request + +1. Log in as the root user on a machine that has the `openssl` software installed. + +2. Create a private key to be packaged in the certificate. + + ```shell + root# openssl genrsa -out ~/example.com.key 2048 + ``` + +3. Create a backup of the key file in a secure location. If you lose the key, the certificate becomes unusable. + + ```shell + root# cp ~/example.com.key secure-dir/example.com.key.backup + ``` + +4. Create a Certificate Signing Request (CSR) file. + + ```shell + root# openssl req -new -sha256 -key ~/example.com.key -out ~/example.com.csr + ``` + +5. Request a certificate from a CA or your internal security group, providing the CSR file (**example.com.csr**). As a reminder, never share private keys (**.key** files) directly with third parties. + + The certificate needs to be PEM format rather than in the Windows-compatible PFX format. If you request the certificate from a CA website yourself, choose NGINX or Apache (if available) when asked to select the server platform for which to generate the certificate. + +6. Copy or move the certificate file and associated key files to the **/etc/nginx/ssl** directory on the NGINX Plus server. + + +## Creating and Modifying Configuration Files + +To reduce errors, this guide has you copy directives from files provided by NGINX into your configuration files, instead of using a text editor to type in the directives yourself. Then you go through the sections in this guide (starting with [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)) to learn how to modify the directives as required for your deployment. + +As provided, there is one file for basic load balancing (with NGINX Open Source or NGINX Plus) and one file for enhanced load balancing (with NGINX Plus). If you are installing and configuring NGINX Open Source or NGINX Plus on a fresh Linux system and using it only to load balance Wildfly traffic, you can use the provided file as your main configuration file, which by convention is called **/etc/nginx/nginx.conf**. + +We recommend, however, that instead of a single configuration file you use the scheme that is set up automatically when you install an NGINX Plus package, especially if you already have an existing NGINX Open Source or NGINX Plus deployment or plan to expand your use of NGINX Open Source or NGINX Plus to other purposes in future. In the conventional scheme, the main configuration file is still called **/etc/nginx/nginx.conf**, but instead of including all directives in it, you create separate configuration files for different HTTP‑related functions and store the files in the **/etc/nginx/conf.d** directory. You then use the `include` directive in the `http` context of the main file to read in the contents of the function-specific files. + +To download the complete configuration file for basic load balancing: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/jboss-basic.conf > jboss-basic.conf +``` + +To download the complete configuration file for enhanced load balancing: + +```none +root# cd /etc/nginx/conf.d +root# curl https://www.nginx.com/resource/conf/jboss-enhanced.conf > jboss-enhanced.conf +``` + +(You can also access the URL in a browser and download the file that way.) + +To set up the conventional configuration scheme, add an `http` configuration block in the main **nginx.conf** file, if it does not already exist. (The standard placement is below any global directives.) Add this `include` directive with the appropriate filename: + +```nginx +http { + include conf.d/jboss-(basic|enhanced).conf; +} +``` + +Directive documentation: [include](https://nginx.org/en/docs/ngx_core_module.html#include) + +You can also use wildcard notation to reference all files that pertain to a certain function or traffic type in the appropriate context block. For example, if you name all HTTP configuration files _function_-http.conf, this is an appropriate include directive: + +```nginx +http { + include conf.d/*-http.conf; +} +``` + +For reference purposes, the text of the full configuration files is included in this document: + +- [Full Configuration for Basic Load Balancing](#full-configuration-basic) +- [Full Configuration for Enhanced Load Balancing](#full-configuration-enhanced) + +We recommend, however, that you do not copy text directly from this document. It does not necessarily use the same mechanisms for positioning text (such as line breaks and white space) as text editors do. In text copied into an editor, lines might run together and indenting of child statements in configuration blocks might be missing or inconsistent. The absence of formatting does not present a problem for NGINX Open Source or NGINX Plus, because (like many compilers) they ignore white space during parsing, relying solely on semicolons and curly braces as delimiters. The absence of white space does, however, make it more difficult for humans to interpret the configuration and modify it without making mistakes. + +### About Reloading Updated Configuration + +We recommend that each time you complete a set of updates to the configuration, you run the `nginx -t` command to test the configuration file for syntactic validity. + +```none +root# nginx -t +nginx: the configuration file /etc/nginx/nginx.conf syntax is ok +nginx: configuration file /etc/nginx/nginx.conf test is successful +``` + +To tell NGINX Open Source or NGINX Plus to start using the new configuration, run one of the following commands: + +```none +root# nginx -s reload +``` + +or + +```none +root# service nginx reload +``` + + +## Configuring Basic Load Balancing with NGINX Open Source or NGINX Plus + +This section explains how to set up NGINX Open Source or NGINX Plus as a load balancer in front of two Wildfly servers. The instructions in the first two sections are mandatory: + +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) +- [Configuring Basic Load Balancing](#load-balancing-basic) + +The instructions in the remaining sections are optional, depending on the requirements of your application: + +- [Configuring Basic Session Persistence](#session-persistence-basic) +- [Configuring Proxy of WebSocket Traffic](#websocket) +- [Configuring Content Caching](#caching) +- [Configuring HTTP/2 Support](#http2) + +The complete configuration file appears in [Full Configuration for Basic Load Balancing](#full-configuration-basic). + +If you are using NGINX Plus, you can configure additional enhanced features after you complete the configuration of basic load balancing. See [Configuring Enhanced Load Balancing with NGINX Plus](#enhanced). + + +### Configuring Virtual Servers for HTTP and HTTPS Traffic + +These directives define virtual servers for HTTP and HTTPS traffic in separate `server` blocks in the top-level `http` configuration block. All HTTP requests are redirected to the HTTPS server. + +1. Configure a `server` block that listens for requests for **"https://example.com"** received on port 443. + + The `ssl_certificate` and `ssl_certificate_key` directives are required; substitute the names of the certificate and private key you chose in [Configuring an SSL/TLS Certificate for Client Traffic](#tls-certificate). + + The other directives are optional but recommended. + + ```nginx + # In the 'http' block + server { + listen 443 ssl; + server_name example.com; + + ssl_certificate /etc/nginx/ssl/example.com.crt; + ssl_certificate_key /etc/nginx/ssl/example.com.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name), [ssl_certificate](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate), [ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate_key), [ssl_prefer_server_ciphers](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_prefer_server_ciphers), [ssl_session_cache](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) + +2. Configure a `server` block that permanently redirects requests for **"http://example.com"** that are received on port 80 to the HTTPS server, which is defined in the previous step. + + If you're not using SSL/TLS for client connections, omit the `return` directive. When instructed in the remainder of this guide to add directives to the `server` block for HTTPS traffic, add them to this block instead. + + ```nginx + # In the 'http' block + server { + listen 80; + server_name example.com; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) + +For more information on configuring SSL/TLS, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the reference documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + +### Configuring Basic Load Balancing + +To configure load balancing, you first create a named _upstream group_, which lists the backend servers among which client requests are distributed. You then set up NGINX Open Source or NGINX Plus as a reverse proxy and load balancer by referring to the upstream group in one or more `proxy_pass` directives. + +1. Configure an upstream group called **jboss** with two Wildfly application servers listening on port 8080, one on IP address 192.168.33.11 and the other on 192.168.33.12. + + ```nginx + # In the 'http' block + upstream jboss { + server 192.168.33.11:8080; + server 192.168.33.12:8080; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +2. In the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), include these two [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) blocks: + + - The first one matches HTTPS requests in which the path starts with **/webapp/**, and proxies them to the **jboss** upstream group we created in the previous step. + - The second one funnels all traffic to the first `location` block, by doing a temporary redirect of all requests for **"http://example.com/"**. + + ```nginx + # In the 'server' block for HTTPS traffic + location /webapp/ { + proxy_pass http://jboss; + } + + location = / { + return 302 /webapp/; + } + ``` + + Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return) + + Note that these blocks handle only standard HTTPS traffic. If you want to load balance WebSocket traffic, you need to add another `location` block as described in [Configuring Proxy of WebSocket Traffic](#websocket). + +By default, NGINX Open Source and NGINX Plus use the Round Robin algorithm for load balancing among servers. The load balancer runs through the list of servers in the upstream group in order, forwarding each new request to the next server. In our example, the first request goes to 192.168.33.11, the second to 192.168.33.12, the third to 192.168.33.11, and so on. For information about the other available load‑balancing algorithms, see the NGINX Plus Admin Guide. + +In NGINX Plus, you can also set up dynamic reconfiguration of an upstream group when the set of backend servers changes, using DNS or an API; see [Enabling Dynamic Reconfiguration of Upstream Groups](#reconfiguration). + +For more information on proxying and load balancing, see [NGINX Reverse Proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) and [HTTP Load Balancing]({{< relref "../../admin-guide/load-balancer/http-load-balancer.md" >}}) in the NGINX Plus Admin Guide, and the reference documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) and [Upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html) modules. + + +### Configuring Basic Session Persistence + +If your application requires basic session persistence (also known as _sticky sessions_), you can implement it in NGINX Open Source with the IP Hash load‑balancing algorithm. (NGINX Plus offers a more sophisticated form of session persistence, as described in [Configuring Advanced Session Persistence](#session-persistence-advanced).) + +With the IP Hash algorithm, for each request a hash based on the client's IP address is calculated and associated with one of the upstream servers. All requests with that hash are sent to that server, thus establishing session persistence. + +If the client has an IPv6 address, the hash is based on the entire address. If it has an IPv4 address, the hash is based on just the first three octets of the address. This is designed to optimize for ISP clients that are assigned IP addresses dynamically from a subnetwork (/24) range. However, it is not effective in these cases: + +- The majority of the traffic to your site is coming from one forward proxy or from clients on the same /24 network, because in that case IP Hash maps all clients to the same server. +- A client's IP address can change during the session, for example when a mobile client switches from a WiFi network to a cellular one. + +To configure session persistence in NGINX, add the `ip_hash` directive to the `upstream` block created in [Configuring Basic Load Balancing](#load-balancing-basic): + +```nginx +# In the 'http' block +upstream jboss { + ip_hash; + server 192.168.33.11:8080; + server 192.168.33.12:8080; +} +``` + +Directive documentation: [ip_hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash) + +You can also use the Hash load‑balancing method for session persistence, with the hash based on any combination of text and [NGINX variables](https://nginx.org/en/docs/varindex.html) you specify. For example, you can hash on full (four‑octet) client IP addresses with the following configuration. + +```nginx +# In the 'http' block +upstream jboss { + hash $remote_addr; + server 192.168.33.11:8080; + server 192.168.33.12:8080; +} +``` + +Directive documentation: [hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash) + + +### Configuring Proxy of WebSocket Traffic + +The WebSocket protocol (defined in [RFC 6455](https://tools.ietf.org/html/rfc6455)) enables simultaneous two‑way communication over a single TCP connection between clients and servers, where each side can send data independently from the other. To initiate the WebSocket connection, the client sends a handshake request to the server, upgrading the request from standard HTTP to WebSocket. The connection is established if the handshake request passes validation, and the server accepts the request. When a WebSocket connection is created, a browser client can send data to a server while simultaneously receiving data from that server. + +The WebSocket protocol works out of the box on Wildfly app servers, so no additional Wildfly configuration is required. If you want NGINX Open Source or NGINX Plus to proxy WebSocket traffic to your Wildfly application servers, add the directives discussed in this section. + +NGINX Open Source and NGINX Plus by default use HTTP/1.0 for upstream connections. To be proxied correctly, WebSocket connections require HTTP/1.1 along with some other configuration directives that set HTTP headers: + +```nginx +# In the 'http' block +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +# In the 'server' block for HTTPS traffic +location /wstunnel/ { + proxy_pass http://jboss; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; +} +``` + +Directive documentation: [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map), [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) + +The first `proxy_set_header` directive is needed because the `Upgrade` request header is hop-by-hop; that is, the HTTP specification explicitly forbids proxies from forwarding it. This directive overrides the prohibition. + +The second `proxy_set_header` directive sets the `Connection` header to a value that depends on the test in the `map` block: if the request has an `Upgrade` header, the `Connection` header is set to `upgrade`; otherwise, it is set to `close`. + +For more information about proxying WebSocket traffic, see [WebSocket proxying](https://nginx.org/en/docs/http/websocket.html) and [NGINX as a WebSocket Proxy](https://www.nginx.com/blog/websocket-nginx/). + + +### Configuring Content Caching + +Caching responses from your Wildfly app servers can both improve response time to clients and reduce load on the servers, because eligible responses are served immediately from the cache instead of being generated again on the server. There are a variety of useful directives that can be used to fine‑tune caching behavior; for a detailed discussion, see [A Guide to Caching with NGINX](https://www.nginx.com/blog/nginx-caching-guide/). + +One choice for caching is [Infinispan](https://infinispan.org/), an open source, distributed cache and key‑value NoSQL data store developed by Red Hat. Java application servers (including Wildfly and JBoss) can embed it as a library or use it as a service, and any non‑Java applications can use it as remote service through TCP/IP. + +Another alternative is to cache server responses on the NGINX Open Source host by creating this configuration: + +1. Include the `proxy_cache_path` directive to create the local disk directory **/tmp/NGINX_cache/** for use as a cache. The `keys_zone` parameter allocates 10 megabytes (MB) of shared memory for a zone called **backcache**, which is used to store cache keys and metadata such as usage timers. A 1‑MB zone can store data for about 8,000 keys. + + ```nginx + # In the 'http' block + proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + ``` + + Directive documentation: [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) + +2. In the `location` block that matches HTTPS requests in which the path starts with **/webapp/**, include the `proxy_cache` directive to reference the cache created in the previous step. + + ```nginx + # In the 'server' block for HTTPS traffic + location /webapp/ { + proxy_pass http://jboss; + proxy_cache backcache; + } + ``` + + Directive documentation: [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) + +For more complete information on caching, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) and the reference documentation for the HTTP [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) module. + + +### Configuring HTTP/2 Support + +HTTP/2 is fully supported in both NGINX Open Source 1.9.5 or later and later, and NGINX Plus R7 and later. As always, we recommend you run the latest version of software to take advantage of improvements and bug fixes. + +- If using NGINX Open Source, note that in version 1.9.5 and later the SPDY module is completely removed from the codebase and replaced with the [HTTP/2](https://nginx.org/en/docs/http/ngx_http_v2_module.html) module. After upgrading to version 1.9.5 or later, you can no longer configure NGINX Open Source to use SPDY. If you want to keep using SPDY, you need to compile NGINX Open Source from the sources in the [NGINX 1.8.x branch](https://nginx.org/en/download.html). + +- In NGINX Plus R8 and later, NGINX Plus supports HTTP/2 by default. (Support for SPDY is deprecated as of that release). Specifically: + + In NGINX Plus R11 and later, the nginx-plus package continues to support HTTP/2 by default, but the nginx-plus-extras package available in previous releases is deprecated by [dynamic modules](https://www.nginx.com/products/dynamic-modules/). + + For NGINX Plus R8 through R10, the nginx-plus and nginx-plus-extras packages support HTTP/2 by default. + + If using NGINX Plus R7, you must install the nginx-plus-http2 package instead of the nginx-plus or nginx-plus-extras package. + +To enable HTTP/2 support, add the `http2` directive in the `server` block for HTTPS traffic that we created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers), so that it looks like this: + +```nginx +# In the 'server' block for HTTPS traffic +listen 443 ssl; +http2 on; +``` + +Directive documentation: [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) + +To verify that HTTP/2 translation is working, you can use the "HTTP/2 and SPDY indicator" plug‑in available for [Google Chrome](https://chrome.google.com/webstore/detail/http2-and-spdy-indicator/mpbpobfflnpcgagjijhmgnchggcjblin?hl=en) and [Firefox](https://addons.mozilla.org/en-US/firefox/addon/http2-indicator/). + + +### Full Configuration for Basic Load Balancing + +The full configuration for basic load balancing appears here for your convenience. It goes in the `http` context. The complete file is available for [download](https://www.nginx.com/resource/conf/jboss-basic.conf) from the NGINX website. + +We recommend that you do not copy text directly from this document, but instead use the method described in [Creating and Modifying Configuration Files](#config-files) to include these directives in your configuration – add an `include` directive to the `http` context of the main **nginx.conf** file to read in the contents of /etc/nginx/conf.d/jboss-basic.conf. + +```nginx +proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +upstream jboss { + # Use IP Hash for session persistence + ip_hash; + + # List of Wildfly application servers + server 192.168.33.11:8080; + server 192.168.33.12:8080; +} + +server { + listen 80; + server_name example.com; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } +} + +server { + listen 443 ssl; + http2 on; + + server_name example.com; + + ssl_certificate /etc/nginx/ssl/; + ssl_certificate_key /etc/nginx/ssl/; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + + # Load balance requests for '/webapp/' across Wildfly application servers + location /webapp/ { + proxy_pass http://jboss; + proxy_cache backcache; + } + + # Return a temporary redirect to '/webapp/' when user requests '/' + location = / { + return 302 /webapp/; + } + + # WebSocket configuration + location /wstunnel/ { + proxy_pass https://jboss; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } +} + +``` + + +## Configuring Enhanced Load Balancing with NGINX Plus + +This section explains how to configure enhanced load balancing with some of the extended features in NGINX Plus. + +**Note:** Before setting up the enhanced features described in this section, you must complete the instructions for basic load balancing in these two sections: + +- [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers) +- [Configuring Basic Load Balancing](#load-balancing-basic) + +Except as noted, all optional basic features (described in the other subsections of [Configuring Basic Load Balancing in NGINX Open Source and NGINX Plus](#basic)) can be combined with the enhanced features described here. + +The features described in the following sections are all optional. + +- [Configuring Advanced Session Persistence](#session-persistence-advanced) +- [Configuring Application Health Checks](#health-checks) +- [Enabling Live Activity Monitoring](#live-activity-monitoring) +- [Enabling Dynamic Reconfiguration of Upstream Groups](#reconfiguration) + +The complete configuration file appears in [Full Configuration for Enhanced Load Balancing](#full-configuration-enhanced). + + +### Configuring Advanced Session Persistence + +NGINX Plus has more sophisticated session persistence methods available than NGINX Open Source, implemented in three variants of the `sticky` directive. In the following example, we add the `sticky learn` directive to the upstream group we created in [Configuring Basic Load Balancing](#load-balancing-basic). + +1. Remove or comment out the `ip_hash` directive, leaving only the `server` directives: + + ```nginx + # In the 'http' block + upstream jboss { + #ip_hash; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +2. Configure session persistence that uses the `sticky learn` directive to refer to the `JSESSIONID` cookie created by your Wildfly application as the session identifier. + + - The `create` and `lookup` parameters to `sticky learn` specify how new sessions are created and existing sessions are searched for, respectively. For new sessions, NGINX Plus sets the session identifier to the value of the `$upstream_cookie_JSESSIONID` variable, which captures the `JSESSIONID` cookie sent by the Wildfly application server. When checking for existing sessions, it uses the `JSESSIONID` cookie sent by the client (the `$cookie_JSESSIONID` variable) as the session identifier. + + Both parameters can be specified more than once (each time with a different variable), in which case NGINX Plus uses the first non-empty variable for each one. + + - The `zone` argument creates a shared memory zone for storing information about sessions. The amount of memory allocated – here, 1 MB – determines how many sessions can be stored at a time (the number varies by platform). The name assigned to the zone – here, `client_sessions` – must be unique for each `sticky` directive. + + ```nginx + # In the 'http' block + upstream jboss { + server 192.168.33.11:8080; + server 192.168.33.12:8080; + sticky learn create=$upstream_cookie_JSESSIONID lookup=$cookie_JSESSIONID + zone=client_sessions:1m; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky learn`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +For more information on session persistence, see the NGINX Plus Admin Guide. + + +### Configuring Application Health Checks + +Health checks are out‑of‑band HTTP requests sent to a server at fixed intervals. They are used to determine whether a server is responsive and functioning correctly, without requiring an actual request from a client. + +Because the `health_check` directive is placed in the `location` block, we can enable different health checks for each application. + +1. In the `location` block that matches HTTPS requests in which the path starts with **/webapp/** (created in [Configuring Basic Load Balancing](#load-balancing-basic)), add the `health_check` directive. + + Here we configure NGINX Plus to send an out‑of‑band request for the top‑level URI **/** (slash) to each of the servers in the **jboss** upstream group every 5 seconds (the default URI and frequency). If a server does not respond correctly, it is marked down and NGINX Plus stops sending requests to it until it passes a subsequent health check. We include the `match` parameter so we can define a nondefault set of health‑check tests (we define them in the next step). + + ```nginx + # In the 'server' block for HTTPS traffic + location /webapp/ { + proxy_pass http://jboss; + proxy_cache backcache; + health_check match=jboss_check; + } + ``` + + Directive documentation: [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) + +2. In the `http` context, include a `match` directive to define the tests that a server must pass to be considered functional. In this example, it must return status code `200`, the `Content-Type` response header must be `text/html`, and the response body must match the indicated regular expression. + + ```nginx + # In the 'http' block + match jboss_check { + status 200; + header Content-Type = text/html; + body ~ "Your WildFly 9 is running"; + } + ``` + + Directive documentation: [match](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) + +3. In the **jboss** upstream group, include the `zone` directive to define a shared memory zone that stores the group's configuration and run-time state, which are shared among worker processes. + + ```nginx + # In the 'http' block + upstream jboss { + zone jboss 64k; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + # ... + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +NGINX Plus also has a slow‑start feature that is a useful auxiliary to health checks. When a failed server recovers, or a new server is added to the upstream group, NGINX Plus slowly ramps up the traffic to it over a defined period of time. This gives the server time to "warm up" without being overwhelmed by more connections than it can handle as it starts up. For more information, see the NGINX Plus Admin Guide. + +For example, to set a slow‑start period of 30 seconds for your Wildfly application servers, include the `slow_start` parameter to their `server` directives: + +```nginx +# In the 'upstream' block +server 192.168.33.11:8080 slow_start=30s; +server 192.168.33.12:8080 slow_start=30s; +``` + +Parameter documentation: [slow_start](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#slow_start) + +For information about customizing health checks, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/load-balancer/http-health-check.md" >}}). + + +### Enabling Live Activity Monitoring + +NGINX Plus includes a live activity monitoring interface that provides key load and performance metrics in real time, including TCP metrics in NGINX Plus R6 and later. Statistics are reported through a RESTful JSON interface, making it very easy to feed the data to a custom or third‑party monitoring tool. There is also a built‑in dashboard. Follow these instructions to deploy it. + +Dashboard tab in NGINX Plus live activity monitoring dashboard + +For more information about live activity monitoring, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/monitoring/live-activity-monitoring.md" >}}). + +The quickest way to configure the module and the built‑in NGINX Plus dashboard is to download the sample configuration file from the NGINX website and modify it as necessary. For more complete instructions, see [Live Activity Monitoring of NGINX Plus in 3 Simple Steps](https://www.nginx.com/blog/live-activity-monitoring-nginx-plus-3-simple-steps/). + +1. Download the **status.conf** file to the NGINX Plus server: + + ```none + # cd /etc/nginx/conf.d + # curl https://www.nginx.com/resource/conf/status.conf > status.conf + ``` + +2. Include the file in the `http` context in the main configuration file (**/etc/nginx/nginx.conf**): + + ```nginx + # In the 'http' block in nginx.conf + include conf.d/status.conf; + ``` + + Directive documentation: [include](https://nginx.org/en/docs/ngx_core_module.html#include) + +3. Customize the file for your deployment as specified by comments in the file. In particular, the default settings in the file allow anyone on any network to access the dashboard. We strongly recommend that you restrict access to the NGINX Plus API with one or more of the following methods: + + - **IP address‑based access control lists (ACLs)**. In the sample configuration file, uncomment the `allow` and `deny` directives, and substitute the address of your administrative network for 10.0.0.0/8. Only users on the specified network can access the status page. + + ```nginx + allow 10.0.0.0/8; + deny all; + ``` + + Directive documentation: [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html) + + - **HTTP Basic authentication**. In the sample configuration file, uncomment the `auth_basic` and `auth_basic_user_file` directives and add user entries to the **/etc/nginx/users** file (for example, by using an [htpasswd generator](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). If you have an Apache installation, another option is to reuse an existing **htpasswd** file. + + ```nginx + auth_basic on; + auth_basic_user_file /etc/nginx/users; + ``` + + Directive documentation: [auth_basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic), [auth_basic_user_file](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file) + + - **Client certificates**, which are part of a complete configuration of SSL/TLS. For more information, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/security-controls/terminating-ssl-http.md" >}}) and the reference documentation for the HTTP [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) module. + + - **Firewall**. Configure your firewall to disallow outside access to the port for the dashboard (8080 in the sample configuration file). + +3. In each upstream group that you want to monitor, include the `zone` directive to define a shared memory zone that stores the group's configuration and run-time state, which are shared among worker processes. + + For example, to monitor your Wildfly application servers, add the `zone` directive to the **jboss** upstream group (if configured [application health checks](#health-checks), you already made this change). + + ```nginx + # In the 'http' block + upstream jboss { + zone jboss 64k; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + # ... + } + ``` + + Directive documentation: [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +4. In the `server` block for HTTPS traffic (created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)), add the `status_zone` directive: + + ```nginx + # In the 'server' block for HTTPS traffic + status_zone jboss; + ``` + + Directive documentation: [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone) + +When you reload the NGINX Plus configuration file, for example by running the `nginx -s reload` command, the NGINX Plus dashboard is available immediately at http://_nginx-plus-server-address_:8080. + + +### Enabling Dynamic Reconfiguration of Upstream Groups + +With NGINX Plus, you can reconfigure load‑balanced server groups (both HTTP and TCP/UDP) dynamically using either the Domain Name System (DNS) or the NGINX Plus API introduced in NGINX Plus R13. See the NGINX Plus Admin Guide for a more detailed discussion of the DNS and [API]({{< relref "../../admin-guide/load-balancer/dynamic-configuration-api.md" >}}) methods. + +#### Configuring the API Method + +To enable dynamic reconfiguration of your upstream group of Wildfly app servers using the NGINX Plus API, you need to grant secured access to it. You can use the API to add or remove servers, dynamically alter their weights, and set their status as `primary`, `backup`, or `down`. + +1. Include the `zone` directive in the **jboss** upstream group to create a shared memory zone for storing the group's configuration and run‑time state, which makes the information available to all worker processes. (If you configured [application health checks](#health-checks) or [live activity monitoring](#live-activity-monitoring), you already made this change.) + + ```nginx + # In the 'http' block + upstream jboss { + zone jboss 64k; + server 192.168.33.11:8080; + server 192.168.33.12:8080; + # ... + } + ``` + + Directive documentation: [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +2. In the `server` block for HTTPS traffic (created in [Configuring Virtual Servers for HTTP and HTTPS Traffic](#virtual-servers)), add a new `location` block for the NGINX Plus API, which enables dynamic reconfiguration among other features. It contains the `api` directive (**api** is also the conventional name for the location, as used here). + + (If you configured [live activity monitoring](#live-activity-monitoring) by downloading the **status.conf** file, it already includes this block.) + + We strongly recommend that you restrict access to the location so that only authorized administrators can access the NGINX Plus API. The [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html) directives in the following example permit access only from the localhost address (127.0.0.1). + + ```nginx + # In the 'server' block for HTTPS traffic + location /api { + api write=on; + allow 127.0.0.1; + deny all; + } + ``` + + Directive documentation: [allow and deny](https://nginx.org/en/docs/http/ngx_http_access_module.html), [api](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) + +#### Configuring the DNS Method + +In the `http` block, add the `resolver` directive pointing to your DNS server. In the **jboss** `upstream` block add the `resolve` parameter to the `server` directive, which instructs NGINX Plus to periodically re‑resolve the domain name (here, **example.com** here) with DNS. + +Also include the `zone` directive in the `upstream` block to create a shared memory zone for storing the upstream group's configuration and run‑time state, which makes the information available to all worker processes. (If you configured [application health checks](#health-checks) or [live activity monitoring](#live-activity-monitoring), you already made this change.) + +```nginx +# In the 'http' block +resolver ; + +upstream jboss { + zone jboss 64k; + server example.com resolve; +} +``` + +Directive and parameter documentation: [resolve](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#resolve), [resolver](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +[NGINX Plus Release 9](https://www.nginx.com/blog/nginx-plus-r9-released/#dns-srv) and later can also use the additional information in DNS `SRV` records, such as the port number. Include the `service` parameter to the `server` directive, along with the `resolve` parameter: + +```nginx +# In the 'http' block +resolver ; + +upstream jboss { + zone jboss 64k; + server example.com service=http resolve; +} +``` + +Parameter documentation: [service](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#service) + + +### Full Configuration for Enhanced Load Balancing + +The full configuration for enhanced load balancing appears here for your convenience. It goes in the `http` context. The complete file is available for [download](https://www.nginx.com/resource/conf/jboss-enhanced.conf) from the NGINX website. + +We recommend that you do not copy text directly from this document, but instead use the method described in [Creating and Modifying Configuration Files](#config-files) to include these directives in your configuration – add an `include` directive to the `http` context of the main **nginx.conf** file to read in the contents of /etc/nginx/conf.d/jboss-enhanced.conf. + +**Note:** The `api` block in this configuration summary and the [downloadable](https://www.nginx.com/resource/conf/jboss-enhanced.conf) jboss-enhanced.conf file is for the [API method](#reconfiguration-api) of dynamic reconfiguration. If you want to use the [DNS method](#reconfiguration-dns) instead, make the appropriate changes to the block. (You can also remove or comment out the directives for the NGINX Plus API in that case, but they do not conflict with using the DNS method and enable features other than dynamic reconfiguration.) + +```nginx +proxy_cache_path /tmp/NGINX_cache/ keys_zone=backcache:10m; + +# WebSocket configuration +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +# Application health checks +match jboss_check { + status 200; + header Content-Type = text/html; + body ~ "Your WildFly 9 is running"; + +} + +upstream jboss { + # Shared memory zone for application health checks, live activity + # monitoring, and dynamic reconfiguration + zone jboss 64k; + + # List of Wildfly application servers + server 192.168.33.11:8080 slow_start=30s; + server 192.168.33.12:8080 slow_start=30s; + + # Session persistence based on JSESSIONID + sticky learn create=$upstream_cookie_JSESSIONID + lookup=$cookie_JSESSIONID + zone=client_sessions:1m; +} + +server { + listen 80; + server_name example.com; + + # Redirect all HTTP requests to HTTPS + location / { + return 301 https://$server_name$request_uri; + } +} + +server { + listen 443 ssl; + http2 on; + + server_name example.com; + + # Required for live activity monitoring of HTTPS traffic + status_zone jboss; + ssl_certificate /etc/nginx/ssl/; + ssl_certificate_key /etc/nginx/ssl/; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + + # Load balance requests for '/webapp/' across Wildfly application servers + location /webapp/ { + proxy_pass http://jboss; + proxy_cache backcache; + + # Active health checks + health_check match=jboss_check; + } + + # Return a 302 redirect to '/webapp/' when user requests '/' + location = / { + return 302 /webapp/; + } + + # WebSocket configuration + location /wstunnel/ { + proxy_pass http://jboss; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } + + # Secured access to the NGINX Plus API + location /api { + api write=on; + allow 127.0.0.1; # Permit access from localhost + deny all; # Deny access from everywhere else + } +} + +``` + + +## Resources + +- [NGINX Plus Overview](https://www.nginx.com/products/nginx) +- [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/_index.md" >}}) +- [NGINX Wiki](https://www.nginx.com/resources/wiki/) + +### Revision History + +- Version 6 (May 2024) – Update about HTTP/2 support (the [http2](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) directive) +- Version 5 (April 2018) – Update naming to Wildfly, and information about metrics gathering with the NGINX Plus API +- Version 4 (December 2017) – Add instructions for DNS method of dynamic reconfiguration (NGINX Plus R14) +- Version 3 (April 2017) – Update about HTTP/2 support (NGINX Plus R11) +- Version 2 (January 2016) – Update about HTTP/2 support (NGINX Plus R8, NGINX Open Source 1.9.9) +- Version 1 (December 2015) – Initial version (NGINX Plus R7, NGINX Open Source 1.9.5) diff --git a/content/nginx/deployment-guides/microsoft-azure/_index.md b/content/nginx/deployment-guides/microsoft-azure/_index.md new file mode 100644 index 000000000..0260ec649 --- /dev/null +++ b/content/nginx/deployment-guides/microsoft-azure/_index.md @@ -0,0 +1,9 @@ +--- +description: Deployment guides for making F5 NGINX Plus highly available in the Microsoft + Azure cloud environment. +menu: + docs: + parent: NGINX Plus +title: Microsoft Azure +weight: 100 +--- diff --git a/content/nginx/deployment-guides/microsoft-azure/high-availability-standard-load-balancer.md b/content/nginx/deployment-guides/microsoft-azure/high-availability-standard-load-balancer.md new file mode 100644 index 000000000..755f3af21 --- /dev/null +++ b/content/nginx/deployment-guides/microsoft-azure/high-availability-standard-load-balancer.md @@ -0,0 +1,320 @@ +--- +description: Create a highly available active-active deployment of F5 NGINX Plus on Microsoft + Azure in combination with the Azure Standard Load Balancer. +docs: DOCS-457 +doctypes: +- task +title: Active-Active HA for NGINX Plus on Microsoft Azure Using the Azure Standard + Load Balancer +toc: true +weight: 100 +--- + +This guide explains how to use F5 NGINX Plus to complement the native load‑balancing options in the Microsoft Azure cloud. We show how to implement our recommended solution, which combines Azure's Standard Load Balancer for fast and efficient handling of Layer 4 traffic and NGINX Plus for advanced, Layer 7 features such as load balancing, caching, and content‑based routing. The combined Standard Load Balancer and NGINX Plus solution is fast, powerful, reliable, and likely to be relatively low‑cost. + +This guide explains how to set up Standard Load Balancer in front of a specific number of NGINX Plus load balancers. + +The [Appendix](#appendix) provides instructions for creating Azure virtual machines (VMs) with the names used in this guide, and installing and configuring the NGINX software on them. + + +## About Standard Load Balancer + +[Azure Standard Load Balancer](https://azure.microsoft.com/en-us/services/load-balancer/) works at Layer 4 (the connection level), quickly and efficiently handling both inbound and outbound traffic. Its developers say that it provides low latency, high throughput, and the ability to scale up to millions of TCP and UDP flows. (Because TCP is the transport protocol for HTTP, this translates to efficient handling of HTTP traffic, but without the processing of HTTP‑related data that NGINX Plus does.) + +The purpose, design, and operation of Standard Load Balancer are similar to the native Layer 4 load balancers in other cloud environments, such as [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) (NLB) in Amazon Web Services (AWS) and [Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/) on the Google Cloud Platform (GCP). Like the solution described in this guide, we have solutions for highly availability of NGINX Plus in [AWS]({{< relref "../amazon-web-services/high-availability-network-load-balancer.md" >}}) and the [GCP]({{< relref "../google-cloud-platform/high-availability-all-active.md" >}}). If you have previously implemented one of them, you'll find the process similar for Standard Load Balancer. + + +## About Traffic Manager + +Azure Traffic Manager is a DNS‑based traffic load balancer that optimally distributes traffic to services across global Azure regions. It uses DNS to direct client requests to the most appropriate service endpoint based on a traffic‑routing method and the health of the endpoints. An endpoint is any Internet‑facing service hosted inside or outside of Azure – in our case, the endpoints are the Standard Load Balancers that front NGINX Plus instances in the regions. Traffic Manager is resilient to failure, including the failure of an entire Azure region. + + +## About NGINX Plus + +NGINX Plus is complementary to Standard Load Balancer. Operating at Layer 7 (the application layer), it uses more advanced load‑balancing criteria, including schemes that rely on the content of requests and the results of NGINX Plus's [active health checks]({{< relref "../../admin-guide/load-balancer/http-health-check.md" >}}). + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of [NGINX Open Source](https://nginx.org/en). NGINX Plus is a complete application delivery platform, extending the power of NGINX with a host of enterprise‑ready capabilities that enhance an AWS web application deployment and are instrumental to building web applications at scale. + +NGINX Plus provides both reverse‑proxy features and load‑balancing features, including: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + +## Solution Overview + +This guide covers how to set up Azure’s Standard Load Balancer and Traffic Manager to provide a highly available, cross‑region, active‑active deployment of NGINX Plus as the load balancer for NGINX Open Source web servers. + +The complete configuration of a Standard Load Balancer consists of a front‑end public IP address, a pool of backend addresses, a health probe, and one or more load‑balancing rules. + +Standard Load Balancer uses a purpose‑built source network address translation (SNAT) algorithm to load balance TCP connections. By default, when you create a Standard Load Balancer, you must also allocate a public IP address for it. + +To distribute traffic to the Azure virtual machines (VMs) hosting your application, you create a backend address pool and specify the pool from within each VM’s NIC resource. + +Standard Load Balancer uses health probes to determine whether a backend instance (in this case, NGINX Plus) can accept new flows. Health probes work much the same as NGINX Plus health checks. For details, see the [Azure documentation](https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-custom-probe-overview). + +You also create a load‑balancing rule to define how traffic is distributed to the VMs, specifying the IP address and port of the front end that listens for incoming traffic and the name of the back‑end pool of application instances, along with the port number on which the instances listen. + +Topology diagram for high availability deployment of NGINX Plus in Azure + +In this guide, the back‑end pool for Standard Load Balancer consists of two NGINX Plus instances, which reverse proxy and load balance traffic to two sets of backend applications, which in turn are also highly available. This setup is then replicated in a different region to create a region failover. You can also use this guide to deploy a greater number of NGINX Plus or NGINX instances in as many regions as you wish. + + +## Prerequisites +These instructions assume you have the following: + +- An Azure [account](https://azure.microsoft.com/en-us/free/). +- An Azure [subscription](https://docs.microsoft.com/en-us/azure/azure-glossary-cloud-terminology?toc=/azure/virtual-network/toc.json#subscription). +- An Azure [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups), preferably dedicated to the HA solution. In this guide, it is called NGINX-Plus-HA. +- An Azure [virtual network](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-overview). +- Six Azure VMs, four running NGINX Open Source and two running NGINX Plus (in each region where you deploy the solution). You need a subscription for each NGINX Plus instance, either paid or a [30‑day free trial](https://www.nginx.com/free-trial-request). + + The [Appendix](#appendix) provides instructions for creating instances with the expected names, and installing and configuring the NGINX software. +- Familiarity with NGINX and NGINX Plus configuration syntax. Complete configuration snippets are provided, but not described in detail. + + +## Setting Up a Standard Load Balancer + +With NGINX Open Source and NGINX Plus installed and configured on the Azure VMs (see the [Appendix](#appendix)) in two different regions (or more if you wish), we’re ready to configure a Standard Load Balancer in each region for a highly available, active‑active NGINX Plus setup. + +- [Creating a Standard Load Balancer](#slb-create) +- [Configuring the Standard Load Balancer](#slb-configure) +- [Verifying Correct Operation](#slb-verify-operation) + + +### Creating a Standard Load Balancer + +1. Access the [Microsoft Azure portal](https://portal.azure.com/) (****) and sign in. + +2. Navigate to the **Load balancers** page. (One way is to click the menu icon at the left end of the Microsoft Azure title bar and select **Load balancers** from the menu.) + + Screenshot of navigating to the 'Load Balancers' page in Azure + +3. On the **Load balancers** page that opens, click the  Create load balancer  button (or **+ Add** in the upper left corner of the page). + +4. On the **Create load balancer** page that opens (to the **Basics** tab), enter the following values: + + - **Subscription** – Name of your subscription (NGINX-Plus-HA-subscription in this guide) + - **Resource group** – Name of your resource group (NGINX-Plus-HA in this guide) + - **Name** – Name of your Standard Load Balancer (lb in this guide) + - **Region** – Name selected from the drop‑down menu ((US) West US 2 in this guide) + - **Type** – Public + - **SKU** – Standard + - **Public IP address** – Create new + - **Public IP address name** – Name for the address (public\_ip\_lb in this guide) + - **Public IP address SKU** – Standard + - **Availability zone** – Zone‑redundant + - **Add a public IPv6 address** – No + + Screenshot of the 'Basics' tab for creating an Azure Standard Load Balancer + +4. If you want to apply one or more tags to the load balancer, click the Next : Tags > button. Otherwise, click the  Review + create  button. + +5. Review your settings (return to the **Basic** tab if corrections are necessary). Click the  Create  button. + + Screenshot of Azure 'Validation passed' page for creating a Standard Load Balancer + + A page like the following appears when deployment is complete. + + Screenshot of Azure 'Deployment complete' page for creating a Standard Load Balancer + + + +### Configuring the Standard Load Balancer + +1. If you are not already on the **Load balancers** page, click **Load balancers** in the left navigation column. + +2. Click the name of the load balancer in the **Name** column of the table (lb in this guide). + + Screenshot of Azure 'Load Balancers' page + + +3. The page that opens has the load balancer name in the upper left corner (**lb** in this guide). Click **Backend pools** in the left navigation column. + + Screenshot of selecting 'Backend pools' on details page for an Azure Standard Load Balancer + +4. On the lb | Backend Pools page that opens, click **+ Add** in the upper left corner of the main pane. + +5. On the Add backend pool page that opens, enter the following values, then click the  Add  button: + + - **Name** – Name of the new backend pool (lb\_backend_pool in this guide) + - **IP version** – IPv4 + - **Virtual machines** – ngx-plus-1 and ngx-plus-2 + + Screenshot of Azure 'Add backend pool' page for Standard Load Balancer + + After a few moments the virtual machines appear in the new backend pool. + +6. Click **Health probes** in the left navigation column, and then **+ Add** in the upper left corner of the main pane on the lb | Health probes page that opens. + +7. On the Add health probe page that opens, enter the following values, then click the  OK  button. + + - **Name** – Name of the new backend pool (lb\_probe in this guide) + - **Protocol** – HTTP or HTTPS + - **Port** – 80 or 443 + - **Path** – / + - **Interval** – 5 + - **Unhealthy threshold** – 2 + + Screenshot of Azure 'Add health probe' page for Standard Load Balancer + + After a few moments the new probe appears in the table on the lb | Health probes page. This probe queries the NGINX Plus landing page every five seconds to check whether NGINX Plus is running. + +8. Click Load balancing rules in the left navigation column, and then **+ Add** in the upper left corner of the main pane on the lb | Load balancing rules page that opens. + +9. On the Add load balancing rule page that opens, enter or select the following values, then click the  OK  button. + + - **Name** – Name of the rule (lb\_rule in this guide) + - **IP version** – IPv4 + - **Frontend IP address** – The Standard Load Balancer's public IP address, as reported in the Public IP address field on the **Overview** tag of the Standard Load Balancer's page (for an example, see [Step 3](#slb-configure-lb-overview) above); in this guide it is 51.143.107.x (LoadBalancerFrontEnd) + - **Protocol** – TCP + - **Port** – 80 + - **Backend port** – 80 + - **Backend pool** – lb_backend + - **Health probe** – lb_probe (HTTP:80) + - **Session persistence** – None + - **Idle timeout (minutes)** – 4 + - **TCP reset** – Disabled + - **Floating IP (direct server return)** – Disabled + - **Create implicit outbound rules** – Yes + + Screenshot of Azure 'Add load balancing rule' page for Standard Load Balancer + + After a few moments the new rule appears in the table on the lb | Load balancing rules page. + + +### Verifying Correct Operation + +1. To verify that Standard Load Balancer is working correctly, open a new browser window and navigate to the IP address for the Standard Load Balancer front end, which appears in the Public IP address field on the **Overview** tab of the load balancer's page on the dashboard (for an example, see [Step 3](#slb-configure-lb-overview) of _Configuring the Standard Load Balancer_). + +2. The default Welcome to nginx! page indicates that the Standard Load Balancer has successfully forwarded a request to one of the two NGINX Plus instances. + + Screenshot of 'Welcome to nginx!' page that verifies correct configuration of an Azure Standard Load Balancer + +3. To verify that the NGINX Plus load balancer is working correctly, add **/application1** and then **/application2** to the public IP address. Pages like the following indicate that you have reached NGINX Open Source instances serving the two backend applications, **App 1** and **App 2**. + + Screenshot of standard NGINX web server demo page from App 1 + + Screenshot of standard NGINX web server demo page from App 2 + + +## Setting Up Azure Traffic Manager + +Once you’ve tested that the Standard Load Balancer has been correctly deployed, you can provide even better availability and resilience by provisioning the complete setup (Standard Load Balancer, NGINX Plus load balancers, and NGINX Open Source web servers) in additional Azure regions. + +In this case, you need to set up Azure Traffic Manager for DNS‑based global server load balancing (GSLB) among the regions. The involves creating a DNS name for the Standard Load Balancer and registering it as an endpoint in Traffic Manager. + +1. Navigate to the Public IP addresses page. (One way is to enter Public IP addresses in the search field of the Azure title bar and select that value in the **Services** section of the resulting drop‑down menu.) + +2. Click the name of the Standard Load Balancer's public IP address in the **Name** column of the table (here it is public\_ip_lb). + + Screenshot of Azure 'Public IP addresses' page + +3. On the **public\_ip_lb** page that opens, click **Configuration** in the left navigation column. + +4. Enter the DNS name for the Standard Load Balancer in the DNS name label field. In this guide, we're accepting the default, public-ip-dns. + + Screenshot of Azure page for public IP address of a Standard Load Balancer + +5. Navigate to the Traffic Manager profiles tab. (One way is to enter Traffic Manager profiles in the search field of the Azure title bar and select that value in the **Services** section of the resulting drop‑down menu.) + +6. Click **+ Add** in the upper left corner of the page. + +7. On the Create Traffic Manager profile page that opens, enter or select the following values and click the  Create  button. + + - **Name** – Name of the profile (ngx in this guide) + - **Routing method** – Performance + - **Subscription** – NGINX-Plus-HA-subscription in this guide + - **Resource group** – NGINX-Plus-HA in this guide + + _Azure-create-lb-create-Traffic-Manager-profile_ + Screenshot of Azure 'Create Traffic Manager profile' page + +8. It takes a few moments to create the profile. When it appears in the table on the Traffic Manager profiles page, click its name in the **Name** column. + +9. On the **ngx** page that opens, click **Endpoints** in the left navigation column, then **+ Add** in the main part of the page. + +10. On the **Add endpoint** window that opens, enter or select the following values and click the  Add  button. + + - **Type** – Azure endpoint + - **Name** – Endpoint name (ep-lb-west-us in this guide) + - **Target resource type** – Public IP address + - **Public IP address** – Name of the Standard Load Balancer's public IP address (public\_ip_lb (51.143.107.x) in this guide) + - **Custom Header settings** – None in this guide + + Screenshot of Azure 'Add endpoint' page + +11. For each additional region, repeat the steps in [Setting Up a Standard Load Balancer](#slb-set-up), and then the steps in this section. + + +## Appendix + +This Appendix provides links to instructions for creating Azure VMs with the names used in this guide, and then installing and configuring NGINX Open Source and NGINX Plus on them: + +- [Creating Azure VMs and Installing the NGINX Software](#create-vm-install-nginx) +- [Configuring NGINX Open Source on the Web Servers](#configure-web-servers) +- [Configuring NGINX Plus on the Load Balancers](#configure-load-balancers) + +After completing the instructions, you have completed the prerequisites for this guide and can continue to [Setting Up a Standard Load Balancer](#slb-set-up). + + +### Creating Azure VMs and Installing the NGINX Software + +The deployment in this guide uses six VMs: two VMs running NGINX Plus that load balance traffic to four VMs running NGINX Open Source as a web server. The four NGINX Open Source VMs are deployed in two pairs, each pair running a different app. + +Step-by-step instructions for creating VMs and installing NGINX Open Source and NGINX Plus are provided in our deployment guide, [Creating Microsoft Azure Virtual Machines for NGINX Open Source and NGINX Plus]({{< relref "virtual-machines-for-nginx.md" >}}). + +**Note:** When installing NGINX Open Source or NGINX Plus, you connect to each instance over SSH. To save time, leave the SSH connection to each instance open after installing the software, for reuse when you configure it using the instructions referenced in the sections below. + +Assign the following names to the VMs, and then install the indicated NGINX software. + +- Four NGINX Open Source VMs: + - **App 1**: + - ngx-oss-app1-1 + - ngx-oss-app1-2 + - **App 2**: + - ngx-oss-app2-1 + - ngx-oss-app2-2 + +- Two NGINX Plus VMs: + - ngx-plus-1 + - ngx-plus-2 + +**Note:** The two NGINX Plus VMs must have a public IP address with same SKU type as the Standard Load Balancer you are creating (in this guide, **Standard**). Instructions are included in our deployment guide, [Creating Microsoft Azure Virtual Machines for NGINX Open Source and NGINX Plus]({{< relref "virtual-machines-for-nginx.md" >}}). + + +### Configuring NGINX Open Source on the Web Servers + +For the purposes of this guide, you configure the NGINX Open Source VMs as web servers that return a page specifying the server name, address, and other information. As an example, here’s the page returned by **App 1**: + +Screenshot of standard NGINX web server demo page from App 1 + +Step-by-step instructions are provided in our deployment guide, Setting Up an NGINX Demo Environment. + +Complete the instructions on all four web servers: + +- Running **App 1**: + - ngx-oss-app1-1 + - ngx-oss-app1-2 +- Running **App 2**: + - ngx-oss-app2-1 + - ngx-oss-app2-2 + + +### Configuring NGINX Plus on the Load Balancers + +For the purposes of this guide, you configure the NGINX Plus VMs as load balancers to distribute requests to the NGINX Open Source web servers you set up in [Configuring NGINX Open Source on the Web Servers](#configure-web-servers). + +Step-by-step instructions are provided in our deployment guide, Setting Up an NGINX Demo Environment. + +Complete the instructions on both ngx-plus-1 and ngx-plus-2. + +### Revision History + +- Version 1 (September 2020) – Initial version (NGINX Plus Release 22) diff --git a/content/nginx/deployment-guides/microsoft-azure/virtual-machines-for-nginx.md b/content/nginx/deployment-guides/microsoft-azure/virtual-machines-for-nginx.md new file mode 100644 index 000000000..052a98d49 --- /dev/null +++ b/content/nginx/deployment-guides/microsoft-azure/virtual-machines-for-nginx.md @@ -0,0 +1,234 @@ +--- +description: Create Microsoft Azure virtual machines for running NGINX Open Source + and F5 NGINX Plus. +docs: DOCS-458 +doctypes: +- task +title: Creating Microsoft Azure Virtual Machines for NGINX Open Source and F5 NGINX Plus +toc: true +weight: 100 +--- + +These instructions explain how to create virtual machines (VMs) in the Microsoft Azure environment that are suitable for running NGINX Open Source and NGINX Plus. + +The names and other settings used in this guide are appropriate for the high‑availability deployment described in [Active-Active HA for NGINX Plus on Microsoft Azure Using the Azure Standard Load Balancer]({{< relref "high-availability-standard-load-balancer.md" >}}), but the VMs can be used for any purpose. + +For NGINX Plus, a faster alternative is to purchase a prebuilt VM in the [Microsoft Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps?search=NGINX%20Plus) (several current operating systems are available). For instructions, see [Installing NGINX Plus on Microsoft Azure]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-microsoft-azure.md" >}}). + + +## Prerequisites + +These instructions assume you have: + +- An Azure [account](https://azure.microsoft.com/en-us/free/). +- An Azure [subscription](https://docs.microsoft.com/en-us/azure/azure-glossary-cloud-terminology?toc=/azure/virtual-network/toc.json#subscription). +- An Azure [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview#resource-groups). In this guide, it is called NGINX-Plus-HA. +- An Azure [virtual network](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-overview). +- If using the instructions in [Automating Installation with Ansible](#automate-ansible), basic Linux system administration skills, including installation of Linux software from vendor‑supplied packages, and file creation and editing. + +In addition, to install NGINX software by following the linked instructions, you need: + +- An NGINX Plus subscription, either paid or a [30‑day free trial](https://www.nginx.com/free-trial-request), if you plan to install that product. +- `root` privilege on the hosts where NGINX Open Source and NGINX Plus are to be installed. If appropriate for your environment, prefix commands with the `sudo` command. + + +## Creating a Microsoft Azure Virtual Machine + +1. Access the [Microsoft Azure portal](https://portal.azure.com/) (****) and sign in. + +2. Click the **Virtual machines** icon. (If that icon doesn't appear at the top of your window, click the stacked‑lines icon in the upper left corner of the title bar and click **Virtual machines** in the navigation column that opens at left.) + + screenshot of top navigation bar at Microsoft Azure portal + +3. On the **Virtual machines** page that opens, click **+ Add** in the upper left corner. + + screenshot of Azure 'Virtual machines' page + + +4. In the **Create a virtual machine** window that opens, enter the requested information on the **Basics** tab. In this guide, we're using the following values: + + - **Subscription** – NGINX-Plus-HA-subscription + - **Resource group** – NGINX-Plus-HA + - **Virtual machine name** – ngx-plus-1 + + The value ngx-plus-1 is one of the six used for VMs in [Active-Active HA for NGINX Plus on Microsoft Azure Using the Azure Standard Load Balancer]({{< relref "high-availability-standard-load-balancer.md" >}}). See Step 7 below for the other instance names. + + - **Region** – (US) West US 2 + - **Availability options** – No infrastructure redundancy required + + This option is sufficient for a demo like the one in this guide. For production deployments, you might want to select a more robust option; we recommend deploying a copy of each VM in a different Availability Zone. For more information, see the [Azure documentation](https://docs.microsoft.com/en-us/azure/availability-zones/az-overview). + - **Image** – Ubuntu Server 18.04 LTS + - **Azure Spot instance** – No + - **Size** – B1s (click Select size to access the Select a VM size window, click the **B1s** row, and click the  Select  button to return to the **Basics** tab) + - **Authentication type** – SSH public key + - **Username** – nginx_azure + - **SSH public key source** – Generate new key pair (the other choices on the drop‑down menu are to use an existing key stored in Azure or an existing public key) + - **Key pair name** – nginx_key + - **Public inbound ports** – Allow selected ports + - **Select inbound ports** – Select from the drop-down menu: SSH (22) and HTTP (80), plus HTTPS (443) if you plan to configure NGINX and NGINX Plus for SSL/TLS + + screenshot of 'Basics' tab on Azure 'Create a virtual machine' page + + +5. If you are creating VMs to use in [Active-Active HA for NGINX Plus on Microsoft Azure Using the Azure Standard Load Balancer]({{< relref "high-availability-standard-load-balancer.md" >}}), the two NGINX Plus VMs in that deployment must have public IP addresses with SKU type **Standard** instead of the default **Basic**. + + For simplicity, we recommend allocating **Standard** public IP addresses for all six VMs used in the deployment. At the time of initial publication of this guide, the hourly cost for six such VMs was only $0.008 more than for six VMs with Basic addresses; for current pricing, see the [Microsoft documentation](https://azure.microsoft.com/en-us/pricing/details/ip-addresses/). + + To allocate a **Standard** public IP address, open the **Networking** tab on the **Create a virtual machine** window. Click Create new below the **Public IP** field. In the Create public IP address column that opens at right, click the **Standard** radio button under **SKU**. You can change the value in the **Name** field; here we are accepting the default created by Azure, ngx-plus-1-ip. Click the  OK  button. + + screenshot of 'Networking' tab on Azure 'Create a virtual machine' page + +6. At this point, you have the option of selecting nondefault values on the **Disks**, **Networking**, **Management**, **Advanced**, and **Tags** tabs. For a demo like the one in this guide, for example, selecting Standard HDD for OS disk type on the **Disks** tab saves money compared to the default, Premium SSD. You might also want to create or apply tags to this VM, on the **Tags** tab. + + When you have completed your changes on all tabs, click the  Review + create  button at the bottom of the **Create a virtual machine** page. + + If all of your settings are valid, a summary of them appears under the **Validation passed** banner, as in the following screenshot. + + To change any settings, open the appropriate tab. If the settings are correct, click the  Create  button. + + If you chose in [Step 4](#create-vm_Basics) to generate a new key pair, a Generate new key pair window pops up. Click the  Download key and create private resource  button. + + screenshot of validation message on Azure 'Create a virtual machine' page + + It takes a few minutes for a VM to deploy. When it's ready, a summary of associated resources appears, as in the following screenshot. + + screenshot of Azure 'CreateVM-Canonical' page + + +7. If you are following these instructions to create the six VMs used in [Active-Active HA for NGINX Plus on Microsoft Azure Using the Azure Standard Load Balancer]({{< relref "high-availability-standard-load-balancer.md" >}}), their names are as follows: + + - ngx-plus-1 + - ngx-plus-2 + - ngx-oss-app1-1 + - ngx-oss-app1-2 + - ngx-oss-app2-1 + - ngx-oss-app2-2 + + For ngx-plus-2, it is probably simplest to repeat Steps 2 through 6 above (or purchase a second prebuilt VM in the [Microsoft Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps?search=NGINX%20Plus)). + + For the NGINX Open Source VMs, you can create them individually using Steps 2 through 6. Alternatively, create them based on an Azure image. To do so, follow Steps 2 through 6 above to create a source VM (naming it nginx-oss), [install the NGINX Open Source software](#install-nginx) on it, and then follow the instructions in [Optional: Creating an NGINX Open Source Image](#create-nginx-oss-image). + + +## Connecting to a Virtual Machine + +To install and configure NGINX Open Source or NGINX Plus on a VM, you need to open a terminal window and connect to the VM over SSH. + +1. Navigate to the **Virtual machines** page on the Azure dashboard and click the VM's name in the **Name** column of the table. + + screenshot of Azure 'Virtual machines' page with list of VMs + +2. On the page that opens (ngx-plus-1 in this guide), note the VM's public IP address (in the Public IP address field in the right column). + + screenshot of details page for 'ngx-plus-1' VM in Azure + +3. Run this command to establish an SSH connection to the VM: + + ```shell + ssh -i @ + ``` + + where + + - `` is the name of the file containing the private key paired with the public key you entered in the SSH public key field in Step 4 of _Creating a Microsoft Azure Virtual Machine_. + - `` is the name you entered in the **Username** field in Step 4 of _Creating a Microsoft Azure Virtual Machine_ (in this guide it is nginx_azure). + - `` is the address you looked up in the previous step. + + +## Installing NGINX Software + +Once you have established a connection with an instance, you can install the NGINX software on it. Follow the instructions in the NGINX Plus Admin Guide for NGINX Open Source and [NGINX Plus]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus.md" >}}). The [Admin Guide]({{< relref "/nginx/admin-guide/" >}}) also provides instructions for many maintenance tasks. + + +### Automating Installation with a Configuration Manager + +You can automate the installation of NGINX Open Source and NGINX Plus. Instructions for Ansible are provided below. For Chef and Puppet, see these articles on the NGINX blog: + +- [Installing NGINX and NGINX Plus with Chef](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/) +- [Deploying NGINX Plus for High Availability with Chef](https://www.nginx.com/blog/nginx-plus-high-availability-chef/) +- [Installing NGINX and NGINX Plus with Puppet](https://www.nginx.com/blog/installing-nginx-nginx-plus-puppet/) + + +#### Automating Installation with Ansible + +NGINX publishes a unified Ansible role for NGINX Open Source and NGINX Plus on [Ansible Galaxy](https://galaxy.ansible.com/nginxinc/nginx/) and [GitHub](https://github.com/nginxinc/ansible-role-nginx). Perform these steps to install and run it. + +1. [Connect to the VM](#connect-vm). + +2. Install Ansible. These commands are appropriate for Debian and Ubuntu systems: + + ```shell + apt update + apt install python-pip -y + pip install ansible + ``` + +3. Install the official Ansible role from NGINX: + + ```shell + ansible-galaxy install nginxinc.nginx + ``` + +4. (NGINX Plus only) Copy the nginx-repo.key and nginx-repo.crt files provided by NGINX to ~/.ssh/ngx-certs/. + +5. Create a file called **playbook.yml** with the following contents: + + ```none + --- + - hosts: localhost + become: true + roles: + - role: nginxinc.nginx + ``` + +5. Run the playbook: + + ```shell + ansible-playbook playbook.yml + ``` + + +## Optional: Creating an NGINX Open Source Image + +To streamline the process of installing NGINX Open Source on multiple VMs, you can create a Microsoft Azure image from an existing NGINX Open Source VM, and spin up additional instances of the image when needed. + +1. [Install NGINX Open Source](#install-nginx) on the source VM, if you haven't already. + +2. Navigate to the **Virtual machines** page, if you are not already there. + +2. In the list of VMs, click the name of the one to use as a source image (in this guide, we have called it ngx-oss). Remember that NGINX Open Source needs to be installed on it already. + +3. On the page than opens, click the **Capture** icon in the top navigation bar. + + screenshot of details page for 'nginx-oss' VM in Azure + +4. On the **Create image** page that opens, take note of the informational and warning banners and take any necessary action. Note in particular that if you use one of the VMs you created in [Creating a Microsoft Azure Virtual Machine](#create-vm) as the source for the image, you will need to re‑create a VM with that name. + + Then select the following values: + + - **Name** – Keep the current value. + - **Resource group** – Select the appropriate resource group from the drop‑down menu. Here it is NGINX-Plus-HA. + - **Automatically delete this virtual machine after creating the image** – We recommend checking the box, since you can't do anything more with the image anyway. + - **Zone resiliency** – On. + - **Type the virtual machine name** – Name of the source VM (ngx-oss in this guide). + + Click the  Create  button. + + screenshot of Azure 'Create Image' page + +### Creating a VM from the Image + +It takes a few moments for the image to be created. When it's ready, you can create VMs from it with NGINX Open Source already installed. + +1. Navigate to the **Images** page. (One method is to type images in the search box in the Microsoft Azure header bar and select that value in the **Services** section of the resulting drop‑down menu.) + + screenshot of Azure 'Images' page + +2. Click the image name in the table. On the page that opens, click **+ Create VM** in the top navigation bar. + + screenshot of details page for Azure 'ngx-plus-1-image' image + +The **Create a virtual machine** page that opens is the same as in Step 4 of Creating a Microsoft Azure Virtual Machine, except that some fields have hardcoded values derived from the image and the **Image** field has the name of the image instead of an operating system. Return to that [step](#create-vm_Basics) to complete the VM creation. + +### Revision History + +- Version 1 (September 2020) – Initial version (NGINX Plus Release 22) diff --git a/content/nginx/deployment-guides/migrate-hardware-adc/_index.md b/content/nginx/deployment-guides/migrate-hardware-adc/_index.md new file mode 100644 index 000000000..7288455a3 --- /dev/null +++ b/content/nginx/deployment-guides/migrate-hardware-adc/_index.md @@ -0,0 +1,9 @@ +--- +description: Deployment guides for migrating from hardware application delivery controllers + (ADCs) to F5 NGINX Plus. +menu: + docs: + parent: NGINX Plus +title: Migrate Hardware ADCs +weight: 100 +--- diff --git a/content/nginx/deployment-guides/migrate-hardware-adc/citrix-adc-configuration.md b/content/nginx/deployment-guides/migrate-hardware-adc/citrix-adc-configuration.md new file mode 100644 index 000000000..dec69b573 --- /dev/null +++ b/content/nginx/deployment-guides/migrate-hardware-adc/citrix-adc-configuration.md @@ -0,0 +1,351 @@ +--- +description: Migrate load-balancing configuration from Citrix ADC to F5 NGINX Plus for + equal performance at lower cost, using our syntax conversion examples. +docs: DOCS-459 +doctypes: +- task +title: Migrating Load Balancer Configuration from Citrix ADC to NGINX Plus +toc: true +weight: 100 +--- + +F5 NGINX Plus provides a flexible replacement for traditional hardware‑based [application delivery controllers](https://www.nginx.com/resources/glossary/application-delivery-controller/) (ADCs). As a software load balancer with a small footprint, NGINX Plus can be deployed just about anywhere – on bare metal, on a virtual machine, or in a container, and on‑premises or in public, private, and hybrid clouds. This guide explains how to migrate the Citrix ADC configuration for several common load‑balancer features to the NGINX Plus application delivery platform. It covers the most commonly used features to get you started quickly. + +**Note:** Citrix ADC was formerly called Citrix NetScaler. + +NGINX Plus and Citrix ADC both act as a full reverse proxy and load balancer, so that the client sees the load balancer as the application and the backend servers see the load balancer as the client. This allows for great control and fine‑grained manipulation of the traffic. This guide focuses on basic load balancing. For information on extending the configuration with Layer 7 logic and scripting, see the article on [migrating Layer 7 logic](https://www.nginx.com/blog/migrating-layer7-logic-f5-irules-citrix-policies-nginx-plus/) on the NGINX blog. It covers features such as content switching and request routing, rewriting, and redirection. + + +## About NGINX Open Source and NGINX Plus + +[NGINX Open Source](https://nginx.org/en) is an open source web server, reverse proxy, and load balancer that has grown in popularity in recent years due to its scalability. NGINX Open Source was first created to solve the C10K problem (serving 10,000 simultaneous connections on a single web server). Its features and performance have made it the go‑to solution at high‑performance sites – it's [the #1 web server at the 100,000 busiest websites in the world](https://w3techs.com/technologies/cross/web_server/ranking). + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of NGINX Open Source. NGINX Plus is a complete software load balancer and application delivery platform, extending the power of NGINX Open Source with a host of enterprise‑ready capabilities that are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + +## Prerequisites + +This guide assumes you are familiar with Citrix ADC concepts and CLI configuration commands. + +Familiarity with basic NGINX software concepts and directives is also helpful; links to documentation are provided, but the guide does not explain NGINX Plus functioning in depth. + + +## Mapping Citrix ADC Networking Concepts to NGINX Plus + +The networking configuration for Citrix ADC defines three types of IP addresses, which can be easily mapped to NGINX Plus: + +- **Citrix ADC IP address (NSIP)** – Management IP address of a specific Citrix ADC appliance. The NGINX Plus equivalent is the host IP address of the NGINX Plus instance. +- **Subnet IP address (SNIP)** – The source (client) IP address seen by backend servers in the load‑balancing configuration. By default, the NGINX Plus equivalent is the same as for the NSIP: the host IP address of the NGINX Plus instance. + + Both NGINX Plus and Citrix ADC use a routing table to choose the best IP address to use. With Citrix ADC, you manage the routing table in the Citrix ADC CLI or GUI, but with NGINX Plus you need to edit the system‑level routing for your Linux or FreeBSD OS. You can also use the [proxy_bind](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_bind) directive in the NGINX Plus configuration to specify the source address used for a specific application. + +- **Virtual IP address (VIP)** – Address advertised to clients for the service provided by the backend servers. The VIP functions in the same way for both Citrix ADC and NGINX Plus: if the primary device or instance fails, the VIP address is reassigned to the secondary. + + +## Converting Citrix ADC Load Balancer Configuration to NGINX Plus + +Citrix ADC uses a CLI for configuration. Even changes made in the GUI are translated internally to CLI commands, so we'll represent Citrix ADC configuration in the CLI format. + +NGINX Plus instead defines configuration with directives in a text file. The following sections explain how to convert Citrix ADC configuration to NGINX Plus for these entities and features: + +- [Virtual Servers](#virtual-servers) +- [SSL/TLS Termination](#ssl) +- [Service Group Entities](#service-group) +- [Session Persistence](#session-persistence) +- [Monitors (Health Checks)](#health-checks) +- [Summary of Converted Load Balancer Configuration](#configuration-summary) + + + +### Virtual Servers + +Citrix ADC uses only the combination of IP address and port to select the virtual server for a request. If you want to consider information in the `Host` header as well, you use AppExpert policies or a Content Switch Virtual Server to inspect it. + +In contrast, the NGINX Plus definition of a virtual server in a [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) block can include both IP address‑port combinations and values in the `Host` header. Include the [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) directive in that block to specify the values to match in the `Host` header. + +The list of parameters to the `server_name` directive can include multiple hostnames, wildcards, and regular expressions. You can include multiple `server_name` directives and multiple listening IP address‑port combinations within one NGINX Plus `server` block. For more information on using `Host` and the `server_name` directive instead of IP addresses, see [Server names](https://nginx.org/en/docs/http/server_names.html) at **nginx.org**. + +The following sample virtual server configuration is for a hostname that ends in **.example.com**. + +#### Citrix ADC + +```none +add lb vserver myvserver HTTP 10.0.0.99 80 +``` + +#### NGINX Plus + +```nginx +server { + listen 10.0.0.99:80; + server_name .example.com; + #... +} +``` + +Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) + + + +### SSL/TLS Termination + +Handling SSL/TLS termination is a common use case for ADC load balancers. Both Citrix ADC and NGINX Plus use OpenSSL libraries to perform the encryption/decryption. NGINX Plus uses the system‑level libraries, so the OpenSSL version is determined by the OS. Citrix ADC uses a modified version of OpenSSL included within its firmware. + +To define the SSL/TLS key and certificate, NGINX Plus uses the `ssl_certificate` and `ssl_certificate_key` directives. The following load‑balancer configuration examples offload SSL/TLS termination from the backend servers. + +#### Citrix ADC + +```none +add ssl certKey test.crt -cert test.crt -key test.key +add lb vserver mysslvserver SSL 10.0.0.98 443 +bind ssl vserver mysslvserver -certkeyName test.crt +``` + +#### NGINX Plus + +```nginx +server { + listen 10.0.0.98:443 ssl; + ssl_certificate test.crt; + ssl_certificate_key test.key; + #... +} +``` + +Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [ssl_certificate and ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate) + + + +### Service Group Entities + +It is straightforward to migrate the Citrix ADC entities that make up Services and Service Groups to NGINX Plus. Citrix ADC uses three major entity types: + +- **Server** – IP address or hostname of a specific backend server +- **Service** – Association of a server entity with a listening port and monitor +- **Service Group** – Association of a pool of server entities and listening ports with a monitor + +NGINX Plus uses the `upstream` block to represent a pool of backend application servers. In the most basic configuration, there is a `server` directive in the `upstream` block for each server in the pool, specifying its IP address or hostname. + +The following load‑balancer configuration examples define a server pool called **myapp** with three servers in it. + +#### Citrix ADC + +```none +add serviceGroup myapp HTTP +bind serviceGroup myapp 10.0.0.100 80 +bind serviceGroup myapp 10.0.0.101 80 +bind serviceGroup myapp 10.0.0.102 80 +``` + +#### NGINX Plus + +```nginx +upstream myapp { + server 10.0.0.100:80; + server 10.0.0.101:80; + server 10.0.0.102:80; +} +``` + +Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + + + +### Session Persistence + +Session persistence is critical for stateless applications and is helpful for continuous delivery use cases. Citrix ADC and NGINX Plus handle session persistence in a similar way. + +If NGINX Plus' [Sticky Cookie](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky) method is compatible with the application, it's a good choice, as it is simple to configure and handles failover well. It works just like the _cookie insert_ method in Citrix ADC: the load balancer adds a session cookie to the first response from a backend server to a given client. The client then includes the cookie in subsequent requests and the load balancer uses it to route the request to the same server. This method doesn't require the load balancer to maintain information about session state, because the session data is stored in the client‑side cookie. + +A logical difference between the products is that Citrix ADC defines session persistence in the configuration for a virtual server, whereas NGINX Plus defines it in the context of the backend server group (`upstream` block). + +The following load‑balancer configuration examples set up cookie‑based session persistence. + +#### Citrix ADC - Cookie-based + +```none +add lb vserver mysslvserver SSL 10.0.0.91 443 -persistenceType COOKIEINSERT -timeout 60 -cookieName mysession +``` + +#### NGINX Plus - Cookie-based + +```nginx +upstream myapp { + server 10.0.0.100:80; + server 10.0.0.101:80; + server 10.0.0.102:80; + sticky cookie mysession expires=1h; +} +``` + +Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky cookie`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +Another session persistence method available in both products takes advantage of a cookie or other token created by the session participants, such as a `JSESSIONID`. NGINX Plus calls this the [Sticky Learn](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky) method. The load balancer maintains a table in memory to map each cookie to a specific backend server. + +The following load‑balancer configuration examples set up session persistence based on the `JSESSIONID` found in an existing cookie. + +#### Citrix ADC - JSESSIONID-based + +```none +set lb vserver mysslvserver -persistencetype RULE -rule 'HTTP.REQ.COOKIE.VALUE("jsessionid")' -resRule 'HTTP.RES.SET_COOKIE.COOKIE("jsessionid")' +``` + +#### NGINX Plus - JSESSIONID-based + +```nginx +upstream myapp { + server 10.0.0.100:80; + server 10.0.0.101:80; + server 10.0.0.102:80; + sticky learn create=$upstream_cookie_jsessionid + lookup=$cookie_jsessionid + zone=client_sessions:1m; +} +``` + +Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky learn`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +The third NGINX Plus session persistence method, [Sticky Route](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), is comparable to Citrix ADC's [custom server ID persistence](https://docs.citrix.com/en-us/citrix-adc/12-1/load-balancing/load-balancing-persistence/custom-server-id-persistence.html). In this method a particular backend server is specified in each request. + + + +### Monitors (Health Checks) + +Citrix ADC uses the term _monitor_ for what NGINX Plus calls a _health check_. You associate a monitor directly with a Citrix ADC service or service group, whereas an NGINX Plus health check is defined in a `location` block. + +With the following load‑balancer configuration examples, the load balancer sends a request for **/** to backend servers; by default, Citrix ADC sends `HEAD` requests and NGINX Plus sends `GET` requests. Citrix ADC marks the server as healthy if it returns response code `200`, while with this default configuration NGINX Plus accepts any `2xx` or `3xx` code. + +#### Citrix ADC - Default health check + +```none +add lb monitor httphealth HTTP -respCode 200 -httpRequest "HEAD /" +bind serviceGroup myapp -monitorName httphealth +``` + +#### NGINX Plus - Default health check + +```nginx +location / { + proxy_pass http://myapp; + health_check; +} +``` + +Directive documentation: [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass) + +It's also possible with both products to define additional characteristics of the request or response. The following examples mark a backend server as healthy if it returns response code `200` and the message body includes the text `Welcome to nginx!`. + +#### Citrix ADC - Custom health check + +```none +add lb monitor httphealth-ecv HTTP-ECV -send "GET /" -recv "Welcome to nginx!" +bind serviceGroup myapp -monitorName httphealth-ecv +``` + +#### NGINX Plus - Custom health check + +```nginx +match server_ok { + status 200; + body ~ "Welcome to nginx!"; +} +server { + #... + location / { + proxy_pass http://myapp; + health_check match=server_ok; + } +} +``` + +Directive documentation: [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [match](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) + +For more information about NGINX Plus health checks, see the [NGINX Plus Admin Guide]({{< relref "../../admin-guide/load-balancer/http-health-check.md" >}}). + + + +### Summary of Converted Load Balancer Configuration + +The following examples bring together the commands and directives from the preceding sections. The NGINX Plus configuration includes some additional directives not discussed above: + +- [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) – Defines a shared memory zone used for health checks +- [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) – Ensures that requests forwarded to the backend servers include the `Host` header, set to the value extracted from that header in the client request +- [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version) – Specifies HTTP version 1.1 for connections to the backend servers + +#### Citrix ADC + +```none +add ssl certKey test.crt -cert test.crt -key test.key +add lb vserver myssl SSL 10.0.0.98 443 -persistenceType COOKIEINSERT -timeout 60 -cookieName mysession +bind ssl vserver myssl -certkeyName test.crt +add serviceGroup myapp HTTP +bind serviceGroup myapp 10.0.0.100 80 +bind serviceGroup myapp 10.0.0.101 80 +bind serviceGroup myapp 10.0.0.102 80 +add lb monitor httphealth HTTP -respCode 200 -httpRequest "HEAD /" +bind serviceGroup myapp -monitorName httphealth +bind lb vserver mysslvserver myapp +``` + +#### NGINX Plus + +```nginx +upstream myapp { + zone myapp 64k; + server 10.0.0.100:80; + server 10.0.0.101:80; + server 10.0.0.102:80; + sticky cookie mysession expires=1h; +} + +server { + listen 10.0.0.98:443 ssl default_server; + ssl_certificate test.crt; + ssl_certificate_key test.key; + proxy_set_header Host $host; + location / { + proxy_pass http://myapp; + health_check; + proxy_http_version 1.1; + } +} +``` + + +## Configuring High Availability + +NGINX Plus and Citrix ADC handle high availability (HA) in similar but slightly different ways. + +Citrix ADC handles the monitoring and failover of the VIP in a proprietary way. + + For [on‑premises deployments]({{< relref "../../admin-guide/high-availability/ha-keepalived.md" >}}), NGINX Plus uses a separate software package called **nginx-ha-keepalived** to handle the VIP and the failover process for an active‑passive pair of NGINX Plus servers. The package implements the VRRP protocol to handle the VIP. Limited [active‑active]({{< relref "../../admin-guide/high-availability/ha-keepalived-nodes.md" >}}) scenarios are also possible with the nginx-ha-keepalived package. + +Solutions for high availability of NGINX Plus in cloud environments are also available, including these: + +- [Active‑Active HA for NGINX Plus on AWS Using AWS Network Load Balancer]({{< relref "../amazon-web-services/high-availability-network-load-balancer.md" >}}) +- [Active‑Passive HA for NGINX Plus on AWS Using Elastic IP Addresses]({{< relref "../amazon-web-services/high-availability-keepalived.md" >}}) +- [All‑Active HA for NGINX Plus on the Google Cloud Platform]({{< relref "../google-cloud-platform/high-availability-all-active.md" >}}) + + +## Logging in Citrix ADC and NGINX Plus + +Logging and monitoring are important supporting functionality for load balancing. Both NGINX Plus and Citrix ADC support logging. + +Citrix ADC logs errors in its _event log_ and NGINX Plus in its [_error log_](https://nginx.org/en/docs/ngx_core_module.html#error_log). By default, Citrix ADC does not log individual requests, but can be configured to do so, using a separate weblog client. NGINX Plus has an [_access log_](https://nginx.org/en/docs/http/ngx_http_log_module.html) for which you can define customized formats to log many metrics (as captured in [variables](https://nginx.org/en/docs/varindex.html)) from both requests and responses. + +The [NGINX Plus API](https://nginx.org/en/docs/http/ngx_http_api_module.html) module collects numerous statistics, which you can access via the API, display on the built‑in live activity monitoring dashboard, or pass to third‑party monitoring tools. For more detail on logging and monitoring see the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/monitoring/_index.md" >}}). + +### Revision History + +- Version 3 (April 2019) – Product name changed to Citrix ADC +- Version 2 (April 2018) – Updated information about high availability and the NGINX Plus API (NGINX Plus R13, NGINX Open Source 1.13.4) +- Version 1 (November 2016) – Initial version (NGINX Plus R11, NGINX 1.11.5) diff --git a/content/nginx/deployment-guides/migrate-hardware-adc/f5-big-ip-configuration.md b/content/nginx/deployment-guides/migrate-hardware-adc/f5-big-ip-configuration.md new file mode 100644 index 000000000..f2db9e017 --- /dev/null +++ b/content/nginx/deployment-guides/migrate-hardware-adc/f5-big-ip-configuration.md @@ -0,0 +1,527 @@ +--- +description: Migrate load-balancing configuration from F5 BIG-IP LTM to NGINX Plus, + using our syntax conversion examples. +docs: DOCS-460 +doctypes: +- task +title: Migrating Load Balancer Configuration from F5 BIG-IP LTM to F5 NGINX Plus +toc: true +weight: 100 +--- + +F5 NGINX Plus provides a flexible replacement for traditional hardware‑based [application delivery controllers](https://www.nginx.com/resources/glossary/application-delivery-controller/) (ADCs). NGINX Plus is a small software package that can be installed just about anywhere – on bare metal, a virtual machine, or a container, and on‑premises or in public, private, and hybrid clouds – while providing the same level of application delivery, high availability, and security offered by legacy ADCs. This guide explains how to migrate an F5 BIG-IP Local Traffic Manager (LTM) configuration to the NGINX Plus software application delivery platform, and covers the most commonly used features and configurations to get you started quickly on your migration. + +NGINX Plus and BIG-IP LTM both act as a full reverse proxy and load balancer, so that the client sees the load balancer as the application and the backend servers see the load balancer as the client. This allows for great control and fine‑grained manipulation of the traffic. This guide focuses on basic load balancing. For information on extending the configuration with Layer 7 logic and scripting, see the post about [migrating Layer 7 logic](https://www.nginx.com/blog/migrating-layer7-logic-f5-irules-citrix-policies-nginx-plus/) on the NGINX blog. It covers features such as content switching and request routing, rewriting, and redirection. + + +## About NGINX Open Source and NGINX Plus + +[NGINX Open Source](https://nginx.org/en) is an open source web server, reverse proxy, and load balancer that has grown in popularity in recent years due to its scalability. NGINX Open Source was first created to solve the C10K problem (serving 10,000 simultaneous connections on a single web server). Its features and performance have made it the go‑to solution at high‑performance sites – it's [the #1 web server at the 100,000 busiest websites in the world](https://w3techs.com/technologies/cross/web_server/ranking). + +[NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) is the commercially supported version of NGINX Open Source. NGINX Plus is a complete software load balancer and application delivery platform, extending the power of NGINX Open Source with a host of enterprise‑ready capabilities that are instrumental to building web applications at scale: + +- [Full‑featured HTTP, TCP, and UDP load balancing](https://www.nginx.com/products/nginx/load-balancing/) +- [Intelligent session persistence](https://www.nginx.com/products/nginx/load-balancing/#session-persistence) +- [High‑performance reverse proxy]({{< relref "../../admin-guide/web-server/reverse-proxy.md" >}}) +- [Caching and offload of dynamic and static content]({{< relref "../../admin-guide/content-cache/content-caching.md" >}}) +- [Adaptive streaming to deliver audio and video to any device](https://www.nginx.com/products/nginx/streaming-media/) +- [Application-aware health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) and [high availability](https://docs.nginx.com/nginx/admin-guide/high-availability/) +- [Advanced activity monitoring available via a dashboard or API](https://www.nginx.com/products/nginx/live-activity-monitoring/) +- [Management and real‑time configuration changes with DevOps‑friendly tools](https://www.nginx.com/products/nginx/load-balancing/#load-balancing-api) + + + +## NGINX Plus Deployment Scenarios + +Architecturally speaking, NGINX Plus differs from traditional ADCs in deployment location and function. Typical hardware‑based ADCs are usually deployed at the edge of the network and act as a front‑door entry point for all application traffic. It's not uncommon to see a large hardware ADC straddle the public and private DMZs, assuming the large burden of processing 100% of the traffic as it comes into the network. You often see ADCs in this environment performing all functions related to traffic flow for all applications – security, availability, optimization, authentication, etc. – requiring extremely large and powerful hardware appliances. The downside to this model is that the ADC is always stationary at the "front door" of the network. + +As they update their infrastructure and approach to application delivery, many companies are paring down the hardware ADC functionality at the edge and moving to a more distributed application model. Because the legacy hardware ADC is already sitting at the edge of the network it can continue to handle all ingress traffic management, directing application traffic to the appropriate NGINX Plus instances for each application type. NGINX Plus then handles traffic for each application type to provide application‑centric load balancing and high availability throughout the network, both on‑ and off‑premises. NGINX Plus is deployed closer to the application and is able to manage all traffic specific to each application type. + +In one architecture for modernizing application delivery infrastructure, hardware ADCs on the edge of the network pass application traffic to NGINX Plus for load balancing +_NGINX Plus can run behind hardware ADCs to handle application traffic_ + +Other companies are completely replacing their stationary hardware ADC appliances at the network edge with NGINX Plus, providing the same level of application delivery at the edge of the network. + +In the most flexible architecture for modern application delivery, NGINX Plus completely replaces hardware application delivery controllers +_NGINX Plus can completely replace hardware ADCs to handle all traffic entering the network_ + + +## Prerequisites + +This guide assumes you are familiar with F5 BIG-IP LTM concepts and CLI configuration commands. Familiarity with basic NGINX software concepts and directives is also helpful; links to documentation are provided, but the guide does not explain NGINX Plus functioning in depth. + + +## Mapping F5 BIG-IP LTM Networking Concepts to NGINX Plus + +- [Network Architecture](#mapping-network) +- [Definitions of Networking Concepts](#mapping-concepts) + + + +### Network Architecture + +When migrating F5 BIG-IP LTM networking and load‑balancer configuration to NGINX Plus, it can be tempting to try translating F5 concepts and commands directly into NGINX Plus syntax. But the result is often frustration, because in several areas the two products don't align very closely in how they conceive of and handle network and application traffic. It's important to understand the differences and keep them in mind as you do your migration. + +F5 divides the network into two parts: the management network (often referred to as the _management plane_ or _control plane_) and the _application traffic network_ (the _data plane_). In a traditional architecture, the management network is isolated from the traffic network and accessible via a separate internal network, while the application network is attached to a public network (or another application network). This requires separate network configurations for each of the two kinds of traffic. + +BIG-IP LTM appliances are a dual‑proxy environment, which means that data plane traffic is also split between two different networks: the client‑side network over which client requests come into the BIG-IP LTM, and the server‑side network over which requests are sent to the application servers. BIG-IP LTM typically requires two network interface cards (NICs) to handle each part of the network. + +It is possible with a BIG-IP LTM appliance, however, to combine the client and server networks on a single NIC, combining the data plane into a single‑stack proxy architecture. This is a very typical architecture in a cloud environment where traffic comes into the BIG-IP LTM data plane and exits through the same virtual NIC. Regardless of networking architecture, the same basic principles for load balancing apply, and the configurations discussed below work in either architectural layout. + +NGINX Plus can function in a similar architecture either by binding multiple IP subnets (and/or VLANs) to a single NIC that is available to the host device, or by installing multiple NICs and using each for unique client and server networks, or multiple client networks and multiple server‑side networks. This is, in essence, how the BIG-IP LTM appliance functions as well, typically shipping with multiple NICs which can be trunked or bound into virtual NICs. + + +### Definitions of Networking Concepts + +Basic F5 BIG-IP LTM networking configuration requires only that you specify the IP addresses of the management and data planes, but managing more complex network environments that include BIG-IP LTM appliances involves some additional concepts. All of these concepts can be very easily simplified and mapped to NGINX Plus instances. Key BIG-IP LTM networking concepts with NGINX Plus correlates include: + +- **Self‑IP address** – The primary interface that listens to incoming client‑side data plane traffic on a specific VLAN. It is a specific IP address or subnet on a specific NIC associated with that VLAN or a VLAN group. + + In NGINX Plus, self‑IP addresses most directly map to the primary host interface used by NGINX Plus to manage traffic‑plane application data. Generally speaking, self IP addresses are not a necessary concept in an NGINX Plus deployment, as NGINX Plus utilizes the underlying OS networking for management and data‑traffic control. + +- **Management IP address:port pairs** – The IP address:port combinations on a BIG-IP LTM appliance that are used to administer it, via the GUI and/or remote SSH access. The NGINX Plus equivalent is the Linux host IP address, typically the primary host interface. It is possible, but not necessary, to use separate IP addresses and/or NICs for management access to the Linux host where NGINX Plus is running, if you need to separate remote access from the application traffic. + +- **Virtual server** – The IP address:port combination used by BIG-IP LTM as the public destination IP address for the load‑balanced applications. This is the IP‑address portion of the virtual server that is associated with the domain name of a frontend application (for instance), and the port that's associated with the service (such as port 80 for HTTP applications). This address handles client requests and shifts from the primary device to the secondary device in the case of a failover. + + Virtual servers in NGINX Plus are configured using a [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) block. The [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive in the `server` block specifies the IP address and port for client traffic. + +- **Pool** and **node list** – A _pool_ is a collection of backend nodes, each hosting the same application or service, across which incoming connections are load balanced. Pools are assigned to virtual servers so BIG-IP LTM knows which backend applications to use when a new request comes into a virtual server. In addition, BIG-IP LTM uses the term _node list_ to refer to an array of distinct services that all use the same traffic protocol and are hosted on the same IP address, but listen on different port numbers (for example, three HTTP services at 192.168.10.10:8100, 192.169.10.10:8200, and 192.168.10.10:8300). + + NGINX Plus flattens the BIG-IP LTM pool and node list concepts by representing that information in [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) configuration blocks, which also define the load‑balancing and session‑persistence method for the virtual server that forwards traffic to the group of backend servers. NGINX Plus does not need the concept of node lists, because standard `upstream` block configuration very easily accommodates multiple services on the same IP address. + +In addition to these networking concepts, there are two other important technology categories to consider when migrating from BIG-IP LTM to NGINX Plus: + +- **iRules** – iRules is a proprietary, event‑driven, content‑switching and traffic‑manipulation engine (based on TCL) used by BIG-IP LTM to control all aspects of data‑plane traffic. iRules are attached to virtual servers and are required for any type of content switching, such as choosing a pool based on URI, inserting headers, establishing affinity with JSESSIONIDs, and so on. iRules are event‑driven and are configured to fire for each new connection when certain criteria are met, such as when a new HTTP request is made to a virtual server or when a server sends a response to a client. + + NGINX Plus natively handles content switching and HTTP session manipulation, eliminating the need to explicitly migrate most context‑based iRules and those which deal with HTTP transactions such as header manipulation. Most context‑based iRules can be translated to `server` and `location` blocks, and more complex iRules that cannot be duplicated with NGINX Plus directives and configuration block can be implemented with the [Lua]({{< relref "../../admin-guide/dynamic-modules/lua.md" >}}) or [JavaScript]({{< relref "../../admin-guide/dynamic-modules/nginscript.md" >}}) modules. For more information on translating iRules to NGINX Plus content rules, see [Migrating Layer 7 Logic from F5 iRules and Citrix Policies to NGINX and NGINX Plus](https://www.nginx.com/blog/migrating-layer7-logic-f5-irules-citrix-policies-nginx-plus/) on the NGINX blog. + +- **High availability** – Conceptually, BIG-IP LTM and NGINX Plus handle high availability (HA) in the same way: each active‑passive pair of load balancers shares a floating "virtual" IP address (VIP) which maps to the currently active instance. If the active instance fails, the passive instance takes over and assumes the VIP. + + BIG-IP LTM uses a built‑in HA mechanism to handle the failover. + + For [on‑premises deployments]({{< relref "../../admin-guide/high-availability/ha-keepalived.md" >}}), NGINX Plus uses a separate software package called **nginx-ha-keepalived** to handle the VIP and the failover process for an active‑passive pair of NGINX Plus servers. The package implements the VRRP protocol to handle the VIP. Limited [active‑active]({{< relref "../../admin-guide/high-availability/ha-keepalived-nodes.md" >}}) scenarios are also possible with the nginx-ha-keepalived package. + + Solutions for high availability of NGINX Plus in cloud environments are also available, including these: + + - [Active‑Active HA for NGINX Plus on AWS Using AWS Network Load Balancer]({{< relref "../amazon-web-services/high-availability-network-load-balancer.md" >}}) + - [Active‑Passive HA for NGINX Plus on AWS Using Elastic IP Addresses]({{< relref "../amazon-web-services/high-availability-keepalived.md" >}}) + - [All‑Active HA for NGINX Plus on the Google Cloud Platform]({{< relref "../google-cloud-platform/high-availability-all-active.md" >}}) + + +## Converting F5 BIG-IP LTM Load-Balancer Configuration to NGINX Plus + +- [Virtual Servers](#virtual-servers) +- [SSL/TLS Offload (Termination and Proxy)](#ssl-offload) +- [Session Persistence](#session-persistence) +- [Keepalive Connections](#keepalive-connections) +- [Monitors (Health Checks)](#health-checks) + +F5 BIG-IP LTM offers three methods for configuration: + +- GUI +- CLI (the custom on‑box Traffic Management Shell [TMSH] tool) +- iControl API + +Ultimately all changes made via the GUI or API are translated to a TMSH CLI command, so that's the representation we're using in this guide. We assume that you are configuring the device from the `(tmos.ltm`) location, and so omit the common command variable `ltm` from all of the TMSH commands. + +With NGINX Plus, configuration is stored in a straightforward text file which can be accessed directly or managed using traditional on‑box tools or configuration management and orchestration tools such as [Ansible](https://www.nginx.com/blog/announcing-unified-ansible-role-nginx-nginx-plus/), [Chef](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/), and [Puppet](https://www.nginx.com/blog/installing-nginx-nginx-plus-puppet/). + +Although the examples in this guide use only IP addresses to identify virtual servers, with NGINX Plus both the listening IP address:port combination and the `Host` header can be used to select the appropriate `server` block to process a request. Include the [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) directive in that block to specify the values to match in the `Host` header. + +The list of parameters to the `server_name` directive can include multiple hostnames, wildcards, and regular expressions. You can include multiple `server_name` directives and multiple listening IP address‑port combinations within one NGINX Plus `server` block. For more information on using `Host` and the `server_name` directive instead of IP addresses, see [Server names](https://nginx.org/en/docs/http/server_names.html) at **nginx.org**. + +**Note:** All IP addresses and names of objects (`upstream` blocks, virtual servers, pools, and so on) in this guide are examples only. Substitute the values from your BIG-IP LTM configuration. + + +### Virtual Servers + +As mentioned above, virtual servers are the primary listeners for both BIG-IP LTM and NGINX Plus, but the configuration syntax for defining them is quite different. Here, a virtual server at 192.168.10.10 listens on port 80 for HTTP traffic, and distributes incoming traffic between the two backend application servers listed in the **test_pool** upstream group. + +#### BIG-IP LTM + +```none +# create pool test_pool members add { 10.10.10.10:80 10.10.10.20:80 } +# create virtual test_virtual { destination 192.168.10.10:80 pool test_pool source-address-translation { type automap } ip-protocol tcp profiles add { http } } +# save sys config +``` + +#### NGINX Plus + +```nginx +http { + upstream test_pool { + server 10.10.10.10:80; + server 10.10.10.20:80; + } + + server { + listen 192.168.10.10:80; + location / { + proxy_pass http://test_pool; + } + #... + } + } +``` + +Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [server virtual](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + + +### SSL/TLS Offload (Termination and Proxy) + +Handling SSL/TLS termination is a common use case for ADC load balancers. F5 BIG-IP LTM uses a proprietary SSL/TLS implementation. NGINX Plus relies on system libraries, so the version of OpenSSL is dictated by the OS. On BIG-IP LTM, a profile for each SSL/TLS key and certificate pair is attached to a virtual server (either as a client profile for encrypting traffic to and from the client, a server profile for encrypting backend traffic, or both). On NGINX Plus, the `ssl_certificate` and `ssl_certificate_key` directives are included in the `server` block for the virtual server. + +There are two methods for handling SSL/TLS traffic on a load balancer instance, termination and proxying: + +- With SSL/TLS termination, the load balancer and client communicate in an encrypted HTTPS session, in the same way a secure application like a banking website handles client encryption with SSL/TLS certificates. After decrypting the client message (effectively terminating the secure connection), the load balancer forwards the message to the upstream server over a cleartext (unencrypted) HTTP connection. In the other direction, the load balancer encrypts the server response before sending it to the client. SSL/TLS termination is a good option if the load balancer and upstream servers are on a secured network where there's no danger of outside agents intercepting and reading the cleartext backend traffic, and where upstream application performance is paramount. + +- In the SSL/TLS proxy architecture, the load balancer still decrypts client‑side traffic as it does in the termination model, but then it re‑encrypts it before forwarding it to upstream servers. This is a good option where the server‑side network is not secure or where the upstream servers can handle the computational workload required for SSL/TLS encryption and decryption. + +#### BIG-IP LTM + +- SSL/TLS Termination and Proxy: Creating SSL/TLS Virtual Server and Pool Members + + ```none + # create pool ssl_test_pool members add { 10.10.10.10:443 10.10.10.20:443 } + # create virtual test_ssl_virtual { destination 192.168.10.10:443 pool ssl_test_pool source-address-translation { type automap } ip-protocol tcp profiles add { http } } + # save /sys config + ``` + +- SSL/TLS Termination: Creating a Client SSL/TLS Profile + + ```none + # create profile client-ssl test_ssl_client_profile cert test.crt key test.key + # modify virtual test_ssl_virtual profiles add { test_ssl_client_profile } + # save /sys config + ``` + +- SSL/TLS Proxy: Creating a Server SSL/TLS Profile + + ```none + # create profile server-ssl test_ssl_server_profile cert test.crt key test.key + # modify virtual test_ssl_virtual profiles add { test_ssl_server_profile } + # save /sys config + ``` + +#### NGINX Plus + +- SSL/TLS Termination + + ```nginx + upstream ssl_test_pool { + server 10.10.10.10:443; + server 10.10.10.20:443; + } + + server { + listen 192.168.10.10:443 ssl; + ssl_certificate /etc/nginx/ssl/test.crt; + ssl_certificate_key /etc/nginx/ssl/test.key; + + location / { + proxy_pass http://ssl_test_pool; + } + } + ``` + +- SSL/TLS Proxy + + ```nginx + upstream ssl_test_pool { + server 10.10.10.10:443; + } + + server { + listen 192.168.10.10:443 ssl; + ssl_certificate /etc/nginx/ssl/test.crt; + ssl_certificate_key /etc/nginx/ssl/test.key; + + location / { + proxy_pass https://ssl_test_pool; + proxy_ssl_certificate /etc/nginx/ssl/client.pem; + proxy_ssl_certificate_key /etc/nginx/ssl/client.key; + proxy_ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + proxy_ssl_ciphers HIGH:!aNULL:!MD5; + proxy_ssl_trusted_certificate /etc/nginx/ssl/trusted_ca_cert.crt; + proxy_ssl_verify on; + proxy_ssl_verify_depth 2; + } + } + ``` + + Directive documentation: [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_ssl*](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate), [server virtual](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [ssl_certificate and ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + + +### Session Persistence + +F5 BIG-IP LTM and NGINX Plus handle session persistence (also referred to as _affinity_) in a similar way and configure it at the same level: on the upstream server (BIG-IP LTM pool or NGINX Plus `upstream` block). Both support multiple forms of persistence. Session persistence is critical for applications that are not stateless and is helpful for continuous delivery use cases. + +#### Cookie-Based Session Persistence + +One method that is simple to configure and handles failover well for NGINX Plus, if compatible with the application, is [_sticky cookie_](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky). It works just like the _cookie insert_ method in BIG-IP LTM: the load balancer creates a cookie that represents the server and the client then includes the cookie in each request, effectively offloading the session tracking from the load balancer itself. + +- BIG-IP LTM: HTTP Cookie Persistence + + ```none + # create persistence cookie test_bigip_cookie cookie-name BIGIP_COOKIE_PERSIST expiration 1:0:0 + # modify virtual test_virtual { persist replace-all-with { test_bigip_cookie } } + # save /sys config + ``` + +- BIG-IP LTM: HTTPS Cookie Persistence + + ```none + # create persistence cookie test_bigip_cookie cookie-name BIGIP_COOKIE_PERSIST expiration 1:0:0 + # modify virtual test_ssl_virtual { persist replace-all-with { test_bigip_cookie } } + # save /sys config + ``` + +- NGINX Plus: HTTP Cookie Persistence + + ```nginx + upstream test_pool { + server 10.10.10.10:80; + server 10.10.10.20:80; + sticky cookie mysession expires=1h; + } + ``` + +- NGINX Plus: HTTPS Cookie Persistence + + ```nginx + upstream ssl_test_pool { + server 10.10.10.10:443; + server 10.10.10.20:443; + sticky cookie mysession expires=1h; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky cookie`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +#### Source IP Address-Based Session Persistence + +Another form of session persistence is based on the source IP address recorded in the request packet (the IP address of the client making the request). For each request the load balancer calculates a hash on the IP address, and sends the request to the backend server that is associated with that hash. Because the hash for a given IP address is always the same, all requests with the hash go to the same server. (For more details on the NGINX Plus implementation, see [Choosing an NGINX Plus Load Balancing Technique](https://www.nginx.com/blog/choosing-nginx-plus-load-balancing-techniques/#ip-hash) on our blog). + +- BIG-IP LTM + + ```none + # modify virtual test_virtual { persist replace-all-with {source_addr} } + # save /sys config + ``` + +- NGINX Plus + + ```nginx + upstream test_pool { + ip_hash; + server 10.10.10.10:80; + server 10.10.10.20:80; + } + ``` + + Directive documentation: [ip_hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash), [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + +#### Token-Based Session Persistence + +Another method for session persistence takes advantage of a cookie or other token created within the session by the backend server, such as a `jsessionid`. To manage `jsessionid` creation and tracking, NGINX Plus creates a table in memory matching the cookie value with a specific backend server. + +- BIG-IP LTM + + BIG-IP LTM does not natively support a learned (or universal) persistence profile without creating a more advanced iRule, which is out of scope for this document. + +- NGINX Plus + + ```nginx + upstream test_pool { + server 10.10.10.10:80; + server 10.10.10.20:80; + sticky learn create=$upstream_cookie_jsessionid + lookup=$cookie_jsessionid + zone=client_sessions:1m; + } + ``` + + Directive documentation: [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [`sticky learn`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + + +### Keepalive Connections + +Typically, a separate HTTP session is created and destroyed for every connection. This can be fine for short‑lived connections, like requesting a small amount of content from a web server, but it can be highly inefficient for long‑lived connections. Constantly creating and destroying sessions and connections can create high load for both the application server and client, slowing down page load and hurting the overall perception of the website or application's performance. HTTP keepalive connections, which instruct the load balancer to keep connections open for a session, are a necessary performance feature for web pages to load more quickly. + +#### BIG-IP LTM + +```none +# modify virtual test_virtual profiles add { oneconnect } +# modify virtual test_ssl_virtual profiles add { oneconnect } +# save /sys config +``` + +#### NGINX Plus + +```nginx +upstream test_pool { + server 10.10.10.10:80; + server 10.10.10.20:80; + keepalive 32; +} +``` + +Directive documentation: [keepalive](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive), [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) + + +### Monitors (Health Checks) + +F5 BIG-IP LTM uses the term _monitor_ to refer to the process of verifying that a server is functioning correctly, while NGINX Plus uses [_health check_](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/). In an BIG-IP LTM configuration, the monitor is associated directly with a pool and applied to each node in the pool, whereas NGINX Plus places the health check in a `location` block. + +The `interval` argument to the following BIG-IP LTM `create` command configures BIG-IP LTM to check the server every 5 seconds, which corresponds to the default frequency for NGINX Plus. NGINX Plus does not need the BIG-IP LTM `timeout` parameter as it implements the timeout function with the `interval` and `fails` parameters. + + +**Note:** This BIG-IP LTM configuration is for HTTP. For HTTPS, substitute `test_ssl_monitor` for `test_monitor` in both the `create` and `modify` commands. The same NGINX Plus configuration works for both HTTP and HTTPS. + + +#### BIG-IP LTM + +```none +# create monitor http test_monitor defaults-from http send "GET /index.html HTTP/1.0\r\n\r\n" interval 5 timeout 20 +# modify pool test_pool monitor test_monitor +# save /sys config +``` + +#### NGINX Plus + +```nginx +upstream test_pool { + # ... + zone test_pool_zone 64k; +} + +server { + # ... + location / { + proxy_pass http://test_pool; + health_check interval=5 fails=2; + } +} +``` + +Directive documentation: [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + + +## Summary of Converted Load Balancer Configuration + + +Here we put together the configuration entities, combining everything required to build a basic F5 BIG-IP LTM basic environment and detail how to migrate the same configuration to NGINX Plus. + +### BIG-IP LTM + +```none + # create pool test_pool members add { 10.10.10.10:80 10.10.10.20:80 } + # create virtual test_virtual { destination 192.168.10.10:80 pool test_pool source-address-translation { type automap } ip-protocol tcp profiles add { http } } + # create pool ssl_test_pool members add { 10.10.10.10:443 10.10.10.20:443 } + # create virtual test_ssl_virtual { destination 192.168.10.10:443 pool ssl_test_pool source-address-translation { type automap } ip-protocol tcp profiles add { http } } + # create profile client-ssl test_ssl_client_profile cert test.crt key test.key + # modify virtual test_ssl_virtual profiles add { test_ssl_client_profile } + # create profile server-ssl test_ssl_server_profile cert test.crt key test.key + # modify virtual test_ssl_virtual profiles add { test_ssl_server_profile } + # create persistence cookie test_bigip_cookie cookie-name BIGIP_COOKIE_PERSIST expiration 1:0:0 + # modify virtual test_virtual { persist replace-all-with { test_bigip_cookie } } + # modify virtual test_ssl_virtual { persist replace-all-with { test_bigip_cookie } } + # modify virtual test_virtual profiles add { oneconnect } + # modify virtual test_ssl_virtual profiles add { oneconnect } + # create monitor http test_monitor defaults-from http send "GET /index.html HTTP/1.0\r\n\r\n" interval 5 timeout 20 + # modify pool test_pool monitor test_monitor + # create monitor https test_ssl_monitor defaults-from https send "GET /index.html HTTP/1.0\r\n\r\n" interval 5 timeout 20 + # modify pool ssl_test_pool monitor test_ssl_monitor + # save /sys config + +``` + +### NGINX Plus + +The following configuration includes three additional directives which weren't discussed previously. Adding them is a best practice when proxying traffic: + +- The [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) `Host $host` directive ensures the `Host` header received from the client is sent with the request to the backend server. +- The [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version) directive sets the HTTP version to 1.1 for the connection to the backend server. +- The `proxy_set_header Connection ""` directive clears the `Connection` header sent by the client, enabling NGINX Plus to keep encrypted keepalive connections open to the upstream servers. + +We are also enabling [live activity monitoring](https://www.nginx.com/products/nginx/live-activity-monitoring) in the final `server` block. Live activity monitoring is implemented in the [NGINX Plus API](https://nginx.org/en/docs/http/ngx_http_api_module.html) module and is exclusive to NGINX Plus. The wide range of statistics reported by the API is displayed on the built‑in dashboard and can also be exported to any application performance management (APM) or monitoring tool that can consume JSON‑formatted messages. For more detail on logging and monitoring see the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/monitoring/_index.md" >}}). + +```nginx +upstream test_pool { + zone test_pool_zone 64k; + server 10.10.10.10:80; + server 10.10.10.20:80; + sticky cookie mysession expires=1h; + keepalive 32; +} + +upstream ssl_test_pool { + zone ssl_test_pool_zone 64k; + server 10.10.10.10:443; + server 10.10.10.20:443; + sticky cookie mysession expires=1h; + keepalive 32; +} + +server { + listen 192.168.10.10:80 default_server; + proxy_set_header Host $host; + + location / { + proxy_pass http://test_pool; + health_check; + proxy_http_version 1.1; + } + + location ~ /favicon.ico { + root /usr/share/nginx/images; + } +} + +server { + listen 192.168.10.10:443 ssl default_server; + ssl_certificate test.crt; + ssl_certificate_key test.key; + proxy_set_header Host $host; + + location / { + proxy_pass https://ssl_test_pool; + proxy_http_version 1.1; + proxy_set_header Connection ""; + health_check; + } + + location ~ /favicon.ico { + root /usr/share/nginx/images; + } +} + +server { + listen 8080; + status_zone status-page; + root /usr/share/nginx/html; + + location /api { + api write=on; + # directives controlling access, such as 'allow' and 'deny' + } + + location = /dashboard.html { + root /usr/share/nginx/html; + } + + # Redirect requests made to the old (pre-R14) dashboard + location = /status.html { + return 301 /dashboard.html; + } + + location ~ /favicon.ico { + root /usr/share/nginx/images; + } +} +``` + +### Revision History + +- Version 2 (April 2018) – Updated information about high availability and the NGINX Plus API (NGINX Plus R13, NGINX Open Source 1.13.4) +- Version 1 (February 2017) – Initial version (NGINX Plus R11, NGINX Open Source 1.11.5) diff --git a/content/nginx/deployment-guides/nginx-plus-high-availability-chef.md b/content/nginx/deployment-guides/nginx-plus-high-availability-chef.md new file mode 100644 index 000000000..20497ba71 --- /dev/null +++ b/content/nginx/deployment-guides/nginx-plus-high-availability-chef.md @@ -0,0 +1,814 @@ +--- +description: Step-by-step instructions for using Chef to automate the setup and maintenance + of an active-passive high availability cluster of F5 NGINX Plus instances. +docs: DOCS-461 +doctypes: +- task +draft: true +title: NGINX Plus High Availability Chef +toc: true +weight: 100 +--- + +In a [previous blog post](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/), we went over how to deploy F5 NGINX Plus using Chef. In this blog post we will build on the Chef configs from the last blog post and use Chef to deploy a highly available (HA) NGINX Plus active/passive cluster. + +This post assumes that you have already run through the previous post and have a working Chef installation. If not, please review at least the first two sections in the previous post, [Preparing Your Chef Environment](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/#prepare-environment) and [Downloading and Configuring the NGINX Cookbook](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/#download-cookbook). + +_Editor – In addition to the previous blog on [Installing NGINX and NGINX Plus with Chef](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/), check out these related blogs about other DevOps automation tools with NGINX and NGINX Plus:_ + +_ + +- [Installing NGINX and NGINX Plus with Ansible](https://www.nginx.com/blog/installing-nginx-nginx-plus-ansible/) +- [Installing NGINX and NGINX Plus with Puppet](https://www.nginx.com/blog/installing-nginx-nginx-plus-puppet/) + +_ + +To set up the highly available active/passive cluster, we’re using the [HA solution]({{< relref "../admin-guide/high-availability/ha-keepalived.md" >}}) provided by NGINX, Inc., which is based on [keepalived](http://www.keepalived.org) and the [Virtual Router Redundancy Protocol](https://tools.ietf.org/html/rfc2338) (VRRP). The solution comes with an interactive script for creating the `keepalived` configuration file, but here we’re using Chef to automate the complete setup process. + +## Modifying the NGINX Cookbook + +First we set up the Chef files for installing of the NGINX Plus HA package (nginx-ha-keepalived) and creating the `keepalived` configuration file, **keepalive.conf**. + +1. Modify the existing **plus_package** recipe to include package and configuration templates for the HA solution, by adding the following code to the bottom of the **plus_package.rb** file (per the instructions in the previous post, the file is in the ~/chef-zero/playground/cookbooks/nginx/recipes directory). + + We are using the **eth1** interface on each NGINX host, which makes the code a bit more complicated than if we used **eth0**. In case you are using **eth0**, the relevant code appears near the top of the file, commented out. + + This code does three things: + + - It looks up the IP address of the **eth1** interface on the node where NGINX Plus is being installed, and assigns the value to the `origip` variable so it can be passed to the template. + - It finds the other node in the HA pair by using Chef’s `search` function to iterate through all Chef nodes, then looks up the IP address for that node’s **eth1** interface and assigns the address to the `ha_pair_ips` variable. + - It installs the nginx-ha-keepalived package, registers the `keepalived` service with Chef, and generates the **keepalived.conf** configuration file as a template, passing in the values of the `origip` and `ha_pair_ips` variables. + + ```nginx + if node['nginx']['enable_ha_mode'] == 'true' + + ha_pair_ips = Array.new + + origip = "#{node[:network][:interfaces][:eth1][:addresses].detect{|k,v| v[:family] == 'inet'}.first}" + + # The code for finding the IP address of the eth0 interface + + # follows, commented out. + + #origip = "#{node[:ipaddress]}" + + #search(:node, "role:nginx_plus_ha") do |nodes| + + # ha_pair_ips << nodes["ipaddress"] + #end + # This is a workaround for getting the IP address for the eth1 + # that VMs need + search(:node, "role:nginx_plus_ha AND enable_ha_mode:true NOT name:#{node.name}") do |nodes| + nodes["network"]["interfaces"]["eth1"]["addresses"].each_pair do |address,value| + ha_pair_ips << address if value.has_key?("broadcast") + end + end + + package 'nginx-ha-keepalived' do + action :install + end + + service 'keepalived' do + supports :status => true, :restart => true, :reload => true + + action :enable + + end + + template '/etc/keepalived/keepalived.conf' do + + source 'nginx_plus_keepalived.conf.erb' + + owner 'root' + + group node['root_group'] + + mode '0644' + + variables( + + :myip => origip, + + :ha_pair_ip => ha_pair_ips + + ) + + notifies :reload, 'service[keepalived]', :delayed + + end + + end + ``` + + You can download the [full recipe file](https://www.nginx.com/resource/conf/plus_package.rb-chef-recipe) from the NGINX, Inc. website. + +2. Create the Chef template for creating **keepalived.conf**, by copying the following content to a new template file, **nginx_plus_keepalived.conf.erb**, in the ~/chef-zero/playground/cookbooks/nginx/templates/default directory. + + We’re using a combination of variables and attributes to pass the necessary information to **keepalived.conf**. We’ll set the attributes in the next step. Here we set the two variables in the template file to the host IP addresses that were set with the `variables` directive in the **plus_package.rb** recipe (modified in the previous step): + + - `myip` – The primary IP address used by `keepalived` to communicate with the other highly available nodes. Corresponds to the `origip` variable in the **plus_package** recipe. + - `ha_pair_ip` – An array containing the IP address of each host that has the **nginx_plus_ha** role in its run list; it is used to set the IP address of the peer (secondary) host in **keepalived.conf**. Corresponds to the `ha_pair_ips` array in the **plus_package** recipe. + + ```nginx + vrrp_script chk_nginx_service { + + script "/usr/lib/keepalived/nginx-ha-check" + + interval 3 + + weight 50 + + } + + vrrp_instance VI_1 { + + interface eth1 + + <% if node['nginx']['ha_primary'] == "true" %> + + state MASTER + + priority 151 + + <% end %> + + <% if node['nginx']['ha_primary'] == "false" %> + + state BACKUP + + priority 150 + + <% end %> + + virtual_router_id 51 + + advert_int 1 + + unicast_src_ip <%= @myip %> + + unicast_peer { + + <% @ha_pair_ip.each do |ip| %> + + <% if ip != @myip %> + + <%= ip %> + + <% end %> + + <% end %> + + } + + authentication { + + auth_type PASS + + auth_pass <%= node['nginx']['ha_keepalived_key'] %> + + } + + virtual_ipaddress { + + <%= node['nginx']['ha_vip'] %> + + } + + track_script { + + chk_nginx_service + + } + + notify "/usr/lib/keepalived/nginx-ha-notify" + + } + + ``` + +3. Create a role that sets attributes used in the recipe and template files created in the previous steps, by copying the following contents to a new role file, **nginx_plus_ha.rb** in the ~/chef-zero/playground/roles directory. + + Four attributes need to be set, and in the role we set the following three: + + - `[nginx][ha_keepalived_key]` – The authentication key used by `keepalived` to encrypt communication with the other highly available nodes. Used in the template file. + - `[nginx][ha_vip]` – The virtual IP address (VIP) advertised to clients as the address for NGINX Plus. The `keepalived` process assigns it to the primary NGINX Plus instance, and is is responsible for transferring it to the other instance in case of failover. Used in the template file. + - `[nginx][enable_ha_mode]` – Triggers the HA section of the **plus_package** recipe when set to `true`. Used in the recipe file. + + We don’t set the fourth attribute, `ha_primary`, in the role because it must be set on a per‑host basis. It is used in the template file to set the `state` and `priority` values in **keepalived.conf**; those values in turn determine which NGINX Plus instance is marked as primary. If `ha_primary` is `true` for a node, it becomes the primary NGINX Plus instance in the HA pair; it becomes the secondary instance if the value is `false`. + + It is also possible to set the primary instance based on hostname matching instead of the `ha_primary` attribute, as we’ll cover in [Setting the Primary Node Based on Hostname Matching](#hostname-match). Also, you can set all four attributes on a per‑host basis, but I prefer to keep as many of the shared attributes in a single location as possible. + + The `run_list` directive in the role references the **nginx_plus** role we created in the [previous blog post](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/#download-cookbook), to save us from having to duplicate everything already defined in that role. + + ```nginx + name "nginx_plus_ha" + description "An example role to install NGINX Plus in an HA cluster" + run_list "role[nginx_plus]" + default_attributes "nginx" => { "enable_ha_mode" => "true", + "ha_keepalived_key" => "a0cf476cf069ea3dfa8940ff6d6bd885", + "ha_vip" => "10.100.10.50" + } + ``` + +## Preparing Nodes for Installation + +Now we bootstrap the nodes and get them ready for the installation. Note that the longer outputs have been truncated, leaving only the most important output. + +### Setting Up the First Node + +1. Upload all of the files and bootstrap the first node: + + `root@chef-server:~# cd chef-zero/playground/cookbooks/ + + root@chef-server:~/chef-zero/playground/cookbooks# knife cookbook upload * + + Uploading apache2 [1.0.0] + + Uploading apt [2.8.2] + + ... + + Uploading nginx [2.7.6] + + ... + + Uploading yum [3.8.1] + + Uploading yum-epel [0.6.3] + + Uploaded 18 cookbooks. + + root@chef-server:~/chef-zero/playground/cookbooks# cd ../ + + root@chef-server:~/chef-zero/playground# knife role from file roles/nginx_plus.rb + + Updated Role nginx_plus! + + root@chef-server:~/chef-zero/playground# knife role from file roles/nginx_plus_ha.rb + + Updated Role nginx_plus_ha! + + root@chef-server:~/chef-zero/playground# knife bootstrap -N chef-test-1 -x username --sudo 10.100.10.100 + + Creating new client for chef-test-1 + + Creating new node for chef-test-1 + + Connecting to 10.100.10.100 + + username@10.100.10.100's password: + + 10.100.10.100 knife sudo password: + + Enter your password: + + 10.100.10.100 + + 10.100.10.100 -----> Existing Chef installation detected + + 10.100.10.100 Starting first Chef Client run... + + 10.100.10.100 Starting Chef Client, version 12.6.0 + + 10.100.10.100 resolving cookbooks for run list: [] + + 10.100.10.100 Synchronizing Cookbooks: + + 10.100.10.100 Compiling Cookbooks... + + 10.100.10.100 [2016-02-07T06:17:13-08:00] WARN: Node chef-test-1 has an empty run list. + + 10.100.10.100 Converging 0 resources + + 10.100.10.100 + + 10.100.10.100 Running handlers: + + 10.100.10.100 Running handlers complete + + 10.100.10.100 Chef Client finished, 0/0 resources updated in 01 seconds + + ` + +2. Create a local copy of the node definition file, which we’ll edit as appropriate for the node we bootstrapped in the previous step, chef-test-1: + + ```nginx + root@chef-server:~/chef-zero/playground# knife node show chef-test-1 --format json > nodes/chef-test-1.json + ``` + +3. Edit chef-test-1.json to have the following contents. In particular, we’re updating the run list and setting the `ha_primary` attribute, as required for the HA deployment. + + ```json + { + "name": "chef-test-1", + "chef_environment": "_default", + "run_list": [ + "role[nginx_plus_ha]" + ] + , + "normal": { + "nginx": { + "ha_primary": "true" + }, + "tags": [ + ] + } + } + ``` + +4. Push the changed node definition to Chef: + + ```nginx + root@chef-server:~/chef-zero/playground# knife node from file nodes/chef-test-1.json + Updated Node chef-test-1! + ``` + +5. Log in on the chef-test-1 node and run the `chef-client` command to get everything configured: + + ```text + username@chef-test-1:~$ sudo chef-client + + Starting Chef Client, version 12.6.0 + + resolving cookbooks for run list: ["nginx"] + + Synchronizing Cookbooks: + + - ohai (2.0.1) + + - build-essential (2.2.4) + + - nginx (2.7.6) + + - yum-epel (0.6.3) + + - apt (2.8.2) + + - bluepill (2.4.0) + + - runit (1.7.2) + + - rsyslog (2.1.0) + + - packagecloud (0.1.0) + + - yum (3.8.1) + + Compiling Cookbooks... + + ... + + * template[/etc/keepalived/keepalived.conf] action create + + - create new file /etc/keepalived/keepalived.conf + + - update content in file /etc/keepalived/keepalived.conf from none to 9816fd + + --- /etc/keepalived/keepalived.conf 2016-02-07 06:34:18.117013010 -0800 + + +++ /etc/keepalived/.keepalived.conf20160207-1079-sjg8xn 2016-02-07 06:34:18.117013010 -0800 + + @@ -1 +1,28 @@ + + +vrrp_script chk_nginx_service { + + + script "/usr/lib/keepalived/nginx-ha-check" + + + interval 3 + + + weight 50 + + +} + + + + + +vrrp_instance VI_1 { + + + interface eth1 + + + state MASTER + + + priority 151 + + + virtual_router_id 51 + + + advert_int 1 + + + unicast_src_ip 10.100.10.100 + + + unicast_peer { + + + } + + + authentication { + + + auth_type PASS + + + auth_pass a0cf476cf069ea3dfa8940ff6d6bd885 + + + } + + + virtual_ipaddress { + + + 10.100.10.50 + + + } + + + track_script { + + + chk_nginx_service + + + } + + + notify "/usr/lib/keepalived/nginx-ha-notify" + + +} + + - change mode from '' to '0644' + + - change owner from '' to 'root' + + - change group from '' to 'root' + + Recipe: nginx::default + + * service[nginx] action start (up to date) + + * service[nginx] action reload + + - reload service service[nginx] + + Recipe: nginx::plus_package + + * service[keepalived] action reload (up to date) + + Running handlers: + + Running handlers complete + + Chef Client finished, 18/50 resources updated in 07 seconds + + ``` + +If we look at **keepalived.conf** at this point, we see the template hasn’t set any values in the `unicast_peer` section, because we’ve registered just this one node with Chef. The following command shows that the VIP specified in the role file, 10.100.10.50, is assigned to **eth1** on this node, making it the primary HA node: + + username@chef-test-1:~$ ip addr show eth1 + 3: eth1: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 08:00:27:c8:66:ee brd ff:ff:ff:ff:ff:ff + inet 10.100.10.100/24 brd 10.100.10.255 scope global eth1 + valid_lft forever preferred_lft forever + inet 10.100.10.50/32 scope global eth1 + valid_lft forever preferred_lft forever + inet6 fe80::a00:27ff:fec8:66ee/64 scope link + valid_lft forever preferred_lft forever + +### Setting Up the Second Node + +Now, let’s get the second node bootstrapped and make sure all of the values are being propagated properly. We know from the first node that the run list works as expected, so we can combine some steps and set the run list and the `ha_primary` attribute directly with the `knife` `bootstrap` command, which we run on the Chef server. + +`root@chef-server:~/chef-zero/playground# knife bootstrap -N chef-test-2 -x username --json-attributes "{\"nginx\": {\"ha_primary\": \"false\"}}" --sudo --run-list "role[nginx_plus_ha]" 10.100.10.102 + +Creating new client for chef-test-2 + +Creating new node for chef-test-2 + +Connecting to 10.100.10.102 + +username@10.100.10.102's password: + +10.100.10.102 sudo: unable to resolve host chef-test + +10.100.10.102 knife sudo password: + +Enter your password: + +10.100.10.102 + +10.100.10.102 -----> Existing Chef installation detected + +10.100.10.102 Starting first Chef Client run... + +10.100.10.102 Starting Chef Client, version 12.6.0 + +10.100.10.102 resolving cookbooks for run list: ["nginx"] + +10.100.10.102 Synchronizing Cookbooks: + +10.100.10.102 - bluepill (2.4.0) + +10.100.10.102 - apt (2.8.2) + +10.100.10.102 - build-essential (2.2.4) + +10.100.10.102 - ohai (2.0.1) + +10.100.10.102 - nginx (2.7.6) + +10.100.10.102 - runit (1.7.2) + +10.100.10.102 - yum-epel (0.6.3) + +10.100.10.102 - rsyslog (2.1.0) + +10.100.10.102 - packagecloud (0.1.0) + +10.100.10.102 - yum (3.8.1) + +10.100.10.102 Compiling Cookbooks... + +... + +10.100.10.102 * service[keepalived] action enable (up to date) + +10.100.10.102 * template[/etc/keepalived/keepalived.conf] action create + +10.100.10.102 - create new file /etc/keepalived/keepalived.conf + +10.100.10.102 - update content in file /etc/keepalived/keepalived.conf from none to a9363c + +10.100.10.102 --- /etc/keepalived/keepalived.conf 2016-02-07 06:45:10.529976825 -0800 + +10.100.10.102 +++ /etc/keepalived/.keepalived.conf20160207-11317-1env6hu 2016-02-07 06:45:10.529976825 -0800 + +10.100.10.102 @@ -1 +1,29 @@ + +10.100.10.102 +vrrp_script chk_nginx_service { + +10.100.10.102 + script "/usr/lib/keepalived/nginx-ha-check" + +10.100.10.102 + interval 3 + +10.100.10.102 + weight 50 + +10.100.10.102 +} + +10.100.10.102 + + +10.100.10.102 +vrrp_instance VI_1 { + +10.100.10.102 + interface eth1 + +10.100.10.102 + state BACKUP + +10.100.10.102 + priority 150 + +10.100.10.102 + virtual_router_id 51 + +10.100.10.102 + advert_int 1 + +10.100.10.102 + unicast_src_ip 10.100.10.102 + +10.100.10.102 + unicast_peer { + +10.100.10.102 + 10.100.10.100 + +10.100.10.102 + } + +10.100.10.102 + authentication { + +10.100.10.102 + auth_type PASS + +10.100.10.102 + auth_pass a0cf476cf069ea3dfa8940ff6d6bd885 + +10.100.10.102 + } + +10.100.10.102 + virtual_ipaddress { + +10.100.10.102 + 10.100.10.50 + +10.100.10.102 + } + +10.100.10.102 + track_script { + +10.100.10.102 + chk_nginx_service + +10.100.10.102 + } + +10.100.10.102 + notify "/usr/lib/keepalived/nginx-ha-notify" + +10.100.10.102 +} + +10.100.10.102 - change mode from '' to '0644' + +10.100.10.102 - change owner from '' to 'root' + +10.100.10.102 - change group from '' to 'root' + +10.100.10.102 Recipe: nginx::default + +10.100.10.102 * service[nginx] action start (up to date) + +10.100.10.102 * service[nginx] action reload + +10.100.10.102 - reload service service[nginx] + +10.100.10.102 Recipe: nginx::plus_package + +10.100.10.102 * service[keepalived] action reload + +10.100.10.102 - reload service service[keepalived] + +10.100.10.102 + +10.100.10.102 Running handlers: + +10.100.10.102 Running handlers complete + +10.100.10.102 Chef Client finished, 18/50 resources updated in 10 seconds` + +If we look at **keepalived.conf** at this point, we see that there is a peer set in the `unicast_peer` section. But the following command shows that chef-test-2, which we intend to be the secondary node, is also assigned the VIP (10.100.10.50). This is because we haven’t yet updated the Chef configuration on chef-test-1 to make its `keepalived` aware of the secondary node. + + username@chef-test-2:~$ ip addr show eth1 + 3: eth1: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 08:00:27:6d:d9:64 brd ff:ff:ff:ff:ff:ff + inet 10.100.10.102/24 brd 10.100.10.255 scope global eth1 + valid_lft forever preferred_lft forever + inet 10.100.10.50/32 scope global eth1 + valid_lft forever preferred_lft forever + inet6 fe80::a00:27ff:fe6d:d964/64 scope link + valid_lft forever preferred_lft forever + +### Synchronizing the Nodes + +To make `keepalived` on chef-test-1 aware of chef-test-2 and its IP address, we rerun the `chef-client` command on chef-test-1: + +```text +username@chef-test-1:~$ sudo chef-client + +Starting Chef Client, version 12.6.0 + +resolving cookbooks for run list: ["nginx"] + +Synchronizing Cookbooks: + +- ohai (2.0.1) + +- build-essential (2.2.4) + +- nginx (2.7.6) + +- yum-epel (0.6.3) + +- apt (2.8.2) + +- bluepill (2.4.0) + +- runit (1.7.2) + +- rsyslog (2.1.0) + +- packagecloud (0.1.0) + +- yum (3.8.1) + +Compiling Cookbooks... + +... + +username@chef-test-1:~$ sudo chef-client + +Starting Chef Client, version 12.6.0 + +resolving cookbooks for run list: ["nginx"] + +Synchronizing Cookbooks: + + - ohai (2.0.1) + + - build-essential (2.2.4) + + - nginx (2.7.6) + + - yum-epel (0.6.3) + + - apt (2.8.2) + + - bluepill (2.4.0) + + - runit (1.7.2) + + - rsyslog (2.1.0) + + - packagecloud (0.1.0) + + - yum (3.8.1) + +Compiling Cookbooks... + +... + + * template[/etc/keepalived/keepalived.conf] action create + + - update content in file /etc/keepalived/keepalived.conf from 9816fd to af7ce0 + + --- /etc/keepalived/keepalived.conf 2016-02-07 06:34:18.117013010 -0800 + + +++ /etc/keepalived/.keepalived.conf20160207-3369-73qgm3 2016-02-07 06:53:04.593013010 -0800 + + @@ -12,6 +12,7 @@ + + advert_int 1 + + unicast_src_ip 10.100.10.100 + + unicast_peer { + + + 10.100.10.102 + + } + + authentication { + + auth_type PASS + +Recipe: nginx::default + + * service[nginx] action start (up to date) + + * service[nginx] action reload + + - reload service service[nginx] + +Recipe: nginx::plus_package + + * service[keepalived] action reload (up to date) + +Running handlers: + +Running handlers complete + +Chef Client finished, 2/47 resources updated in 05 seconds + +``` + +We see that chef-test-1 is still assigned the VIP: + + ```nginx + username@chef-test-1:~$ ip addr show eth1 + 3: eth1: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 08:00:27:c8:66:ee brd ff:ff:ff:ff:ff:ff + inet 10.100.10.100/24 brd 10.100.10.255 scope global eth1 + valid_lft forever preferred_lft forever + inet 10.100.10.50/32 scope global eth1 + valid_lft forever preferred_lft forever + inet6 fe80::a00:27ff:fec8:66ee/64 scope link + valid_lft forever preferred_lft forever + ``` + +And chef-test-2, as the secondary node, is now assigned only its physical IP address: + + ```nginx + username@chef-test-2:~$ ip addr show eth1 + 3: eth1: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 08:00:27:6d:d9:64 brd ff:ff:ff:ff:ff:ff + inet 10.100.10.102/24 brd 10.100.10.255 scope global eth1 + valid_lft forever preferred_lft forever + inet6 fe80::a00:27ff:fe6d:d964/64 scope link + valid_lft forever preferred_lft forever + ``` + +If you want to test the `keepalived` failover at this point, stop the NGINX Plus service on the primary node. + +## Setting the Primary Node Based on Hostname Matching + +You can automate your HA setup even further if you indicate a host’s usual role (primary or secondary) in its hostname. Then, instead of setting the `ha_primary` attribute in the node definition file (as in Step 3 of [Setting Up the First Node](#first-node)), you can have Chef set the attribute based on the hostname. This takes advantage of Chef’s flexibility in allowing you to set variables inside of recipes, which you do by using slightly modified Ruby code to inspect any of the attributes and membership information the Chef server is aware of. One example is Step 1 of [Modifying the NGINX Cookbook](#modify-cookbook), where we added code to the **plus_package** recipe to look up **eth1**‘s IP address. + +Here we extend that code further by adding an `if` statement that inspects the hostname and sets `ha_primary` appropriately. It relies on the presence of the word **primary** or **standby** in the hostname. Put it after the code in **plus_package.rb** that assigns a value to the **origip** variable (the complete code appears in Step 1 of [Modifying the NGINX Cookbook](#modify-cookbook)): + + ```nginx + # ... + origip = "#{node[:network][:interfaces][:eth1][:addresses].detect{|k,v| v[:family] == 'inet'}.first}" + search(:node, "role:nginx_plus_ha AND enable_ha_mode:#{node.nginx.enable_ha_mode} NOT name:#{node.name}") do |nodes| + nodes["network"]["interfaces"]["eth1"]["addresses"].each_pair do |address,value| + ha_pair_ips << address if value.has_key?("broadcast") + end + end + + if node['name'].include? "primary" + ha_primary = "true" + elsif node['name'].include? "standby" + ha_primary = "false" + end + package 'nginx-ha-keepalived' do + action :install + end + # ... + ``` + +Of course you can match on terms in the hostname other than **primary** and **standby**, such as **lb1** and **lb2**. You can also put this logic in the **nginx_plus_keepalived.conf.erb** template file instead, but that is less portable. + +## Summary + +We have covered some more complex examples of what is possible using the Chef `search` function, as well as Ruby code in the cookbook recipes. It is possible to apply these techniques in many ways, using attributes and the `search` function to build very portable and flexible Chef configurations. We will cover some of these concepts in future blog posts. + +_Editor – For more about using DevOps automation tools with NGINX and NGINX Plus, check out these related blogs:_ + +_ + +- [Installing NGINX and NGINX Plus with Ansible](https://www.nginx.com/blog/installing-nginx-nginx-plus-ansible/) +- [Installing NGINX and NGINX Plus with Chef](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/) +- [Installing NGINX and NGINX Plus with Puppet](https://www.nginx.com/blog/installing-nginx-nginx-plus-puppet/) + +_ + +Try out the NGINX Plus HA solution with Chef for yourself – start your [free 30-day trial](https://www.nginx.com/free-trial-request/) today or [contact us](https://www.nginx.com/contact-sales/) for a live demo. diff --git a/content/nginx/deployment-guides/setting-up-nginx-demo-environment.md b/content/nginx/deployment-guides/setting-up-nginx-demo-environment.md new file mode 100644 index 000000000..1c3ced59f --- /dev/null +++ b/content/nginx/deployment-guides/setting-up-nginx-demo-environment.md @@ -0,0 +1,278 @@ +--- +description: Configure NGINX Open Source as a web server and F5 NGINX Plus as a load + balancer, as required for the sample deployments in NGINX deployment guides. +docs: DOCS-462 +doctypes: +- task +title: Setting Up an NGINX Demo Environment +toc: true +weight: 100 +--- + +The instructions in this guide explain how to set up a simple demo environment that uses F5 NGINX Plus to load balance web servers that run NGINX Open Source and serve two distinct web applications. It is referenced by some of our deployment guides for implementing highly availability of NGINX Plus and NGINX Open Source in cloud environments. + + +## Prerequisites + +This guide assumes you have already provisioned a number of host systems (physical servers, virtual machines, containers, or cloud instances) required for a deployment guide (if applicable) and installed NGINX Open Source or NGINX Plus on each instance as appropriate. For installation instructions, see the [NGINX Plus Admin Guide]({{< relref "/nginx/admin-guide/installing-nginx/_index.md" >}}). + +Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + + +## Configuring NGINX Open Source for Web Serving + +The steps in this section configure an NGINX Open Source instance as a web server to return a page like the following, which specifies the server name, address, and other information. The page is defined in the demo-index.html configuration file you create in Step 4 below. + + + +If you are using these instructions to satisfy the prerequisites for one of our deployment guides, the Appendix in the guide specifies the name of each NGINX Open Source instance and whether to configure **App 1** or **App 2**. + +**Note:** Some commands require `root` privilege. If appropriate for your environment, prefix commands with the `sudo` command. + +1. Open a connection to the NGINX Open Source instance and change the directory to **/**etc/nginx/conf.d**: + + ```shell + cd /etc/nginx/conf.d + ``` + +2. Rename **default.conf** to **default.conf.bak** so that NGINX Plus does not use it. + + ```shell + mv default.conf default.conf.bak + ``` + +3. Create a new file called **app.conf** with the following contents. + + ```nginx + server { + listen 80 default_server; + server_name app_server; + + root /usr/share/nginx/html; + error_log /var/log/nginx/app-server-error.log notice; + index demo-index.html index.html; + expires -1; + + sub_filter_once off; + sub_filter 'server_hostname' '$hostname'; + sub_filter 'server_address' '$server_addr:$server_port'; + sub_filter 'server_url' '$request_uri'; + sub_filter 'remote_addr' '$remote_addr:$remote_port'; + sub_filter 'server_date' '$time_local'; + sub_filter 'client_browser' '$http_user_agent'; + sub_filter 'request_id' '$request_id'; + sub_filter 'nginx_version' '$nginx_version'; + sub_filter 'document_root' '$document_root'; + sub_filter 'proxied_for_ip' '$http_x_forwarded_for'; + } + ``` + + Directive documentation: [error_log](http://nginx.org/en/docs/ngx_core_module.html#error_log), [expires](http://nginx.org/en/docs/http/ngx_http_headers_module.html#expires), [index](http://nginx.org/en/docs/http/ngx_http_index_module.html#index), [listen](http://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [root](http://nginx.org/en/docs/http/ngx_http_core_module.html#root), [server](http://nginx.org/en/docs/http/ngx_http_core_module.html#server), [server_name](http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name), [sub_filter](http://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter) + +4. Include the following directive in the top‑level ("main") context in **/etc/nginx/nginx.conf**, if it does not already appear there. + + ```nginx + include conf.d/*.conf; + ``` + + Directive documentation: [include](http://nginx.org/en/docs/ngx_core_module.html#include) + +5. In the **/usr/share/nginx/html** directory, create a new file called **demo-index.html** with the following contents, which define the default web page that appears when users access the instance. + + In the `` tag, replace the comment with `1` or `2` depending on whether the instance is serving **App 1** or **App 2**. + + ```html + <!DOCTYPE html> + <html> + <head> + <title>Hello World - App X <!-- Replace 'X' with '1' or '2' as appropriate --> + + + + + + + + NGINX Logo +
      +

      Server name: server_hostname

      +

      Server address: server_address

      +

      User Agent: client_browser

      +

      URI: server_url

      +

      Doc Root: document_root

      +

      Date: server_date

      +

      NGINX Frontend Load Balancer IP: remote_addr

      +

      Client IP: proxied_for_ip

      +

      NGINX Version: nginx_version

      +
      +
      + Auto Refresh +
      + + + + ``` + + +## Configuring NGINX Plus for Load Balancing + +The steps in this section configure an NGINX Plus instance to load balance requests across the group of NGINX Open Source web servers you configured in the [previous section](#nginx-oss). + +If you are using these instructions to satisfy the prerequisites for one of our deployment guides, the Appendix in the guide specifies the names of the NGINX Plus instances used in it. + +Repeat these instructions on each instance. Alternatively, you can configure one instance and share the configuration with its peers in a cluster. See the [NGINX Plus Admin Guide]({{< relref "../admin-guide/high-availability/configuration-sharing.md" >}}). + +1. Open a connection to the NGINX Plus instance and change the directory to **/**etc/nginx/conf.d**: + + ```shell + cd /etc/nginx/conf.d + ``` + +2. Rename **default.conf** to **default.conf.bak** so that NGINX Plus does not use it. + + ```shell + mv default.conf default.conf.bak + ``` + +3. Create a new file called **lb.conf** with the following contents. + + **Note:** In the `upstream` blocks, include a [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) directive for each NGINX Open Source instance that serves the relevant application. + + ```nginx + # in the 'http' context + upstream app1 { + server ; + # 'server' directives for additional App 1 servers, if using + zone app1 64k; + } + + upstream app2 { + server ; + # 'server' directives for additional App 2 servers, if using + zone app2 64k; + } + + server { + listen 80; + status_zone backend; + root /usr/share/nginx/html; + + location / { + # directives for serving the site's HTML landing page + } + + location /application1 { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_pass http://app1/; + } + + location /application2 { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_pass http://app2/; + } + + location /api { + api write=on; + # directives to control access, such as 'allow' and 'deny' + } + + location = /dashboard.html { + root /usr/share/nginx/html; + } + + location = /status.html { # redirect requests that are made to pre-R14 dashboard + return 301 /dashboard.html; + } + } + ``` + + Directive documentation: [api](https://nginx.org/en/docs/http/ngx_http_api_module.html#api), [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen), [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location), [proxy_pass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass), [proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header), [return](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return), [root](https://nginx.org/en/docs/http/ngx_http_core_module.html#root), [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) (upstream),[server](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) (virtual), [server_name](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name), [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone), [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream), [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) + +4. Include the following directive in the top‑level ("main") context in **/etc/nginx/nginx.conf**, if it does not already appear there. + + ```nginx + include conf.d/*.conf; + ``` + + Directive documentation: [include](http://nginx.org/en/docs/ngx_core_module.html#include) + +### Revision History + +- Version 2 (April 2019) – Generalized instructions for use with deployment guides +- Version 1 (April 2018) – Initial version diff --git a/content/nginx/deployment-guides/single-sign-on/_index.md b/content/nginx/deployment-guides/single-sign-on/_index.md new file mode 100644 index 000000000..3c84b90c7 --- /dev/null +++ b/content/nginx/deployment-guides/single-sign-on/_index.md @@ -0,0 +1,9 @@ +--- +description: Learn how to use OpenID Connect (OIDC) Provider Servers and Services + to enable single sign-on for applications proxied by F5 NGINX Plus. +menu: + docs: + parent: NGINX Plus +title: Set Up Single Sign-On for Proxied Applications +weight: 100 +--- diff --git a/content/nginx/deployment-guides/single-sign-on/active-directory-federation-services.md b/content/nginx/deployment-guides/single-sign-on/active-directory-federation-services.md new file mode 100644 index 000000000..e98666c82 --- /dev/null +++ b/content/nginx/deployment-guides/single-sign-on/active-directory-federation-services.md @@ -0,0 +1,153 @@ +--- +description: Enable OpenID Connect-based single-sign for applications proxied by NGINX + Plus, using Microsoft AD FS as the identity provider (IdP). +docs: DOCS-463 +doctypes: +- task +title: Single Sign-On with Microsoft Active Directory FS +toc: true +weight: 100 +--- + +This guide explains how to enable single sign-on (SSO) for applications being proxied by F5 NGINX Plus. The solution uses OpenID Connect as the authentication mechanism, with [Microsoft Active Directory Federation Services](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services) (AD FS) as the identity provider (IdP) and NGINX Plus as the relying party. + +{{< see-also >}}{{< readfile file="includes/nginx-openid-repo-note.txt" markdown="true" >}}{{< /see-also >}} + + +## Prerequisites + +The instructions assume you have the following: + +- A running deployment of AD FS, either on‑premises or in Azure. +- An NGINX Plus subscription and NGINX Plus R15 or later. For installation instructions, see the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/). +- The [NGINX JavaScript module](https://www.nginx.com/blog/introduction-nginscript/) (njs), required for handling the interaction between NGINX Plus and the IdP. After installing NGINX Plus, install the module with the command for your operating system. + + For Debian and Ubuntu: + + ```none + sudo apt install nginx-plus-module-njs + ``` + + For CentOS, RHEL, and Oracle Linux: + + ```shell + sudo yum install nginx-plus-module-njs + ``` + +- The following directive included in the top-level ("main") configuration context in **/etc/nginx/nginx.conf**, to load the NGINX JavaScript module: + + ```nginx + load_module modules/ngx_http_js_module.so; + ``` + + +## Configuring AD FS + +Create an AD FS application for NGINX Plus: + +1. Open the AD FS Management window. In the navigation column on the left, right‑click on the **Application Groups** folder and select Add Application Group from the drop‑down menu. + + The Add Application Group Wizard window opens. The left navigation column shows the steps you will complete to add an application group. + +2. In the **Welcome** step, type the application group name in the **Name** field. Here we are using ADFSSSO. In the **Template** field, select **Server application** under Standalone applications. Click the  Next >  button. + + + + +3. In the **Server application** step: + + 1. Make a note of the value in the **Client Identifier** field. You will add it to the NGINX Plus configuration in [Step 4 of _Configuring NGINX Plus_](#nginx-plus-variables).
      + + 2. In the **Redirect URI** field, type the URI of the NGINX Plus instance including the port number, and ending in **/\_codexch**. Here we’re using https://my-nginx.example.com:443/\_codexch. Click the  Add  button. + + **Notes:** + + - For production, we strongly recommend that you use SSL/TLS (port 443). + - The port number is mandatory even when you're using the default port for HTTP (80) or HTTPS (443). + +3. Click the  Next >  button. + + + + +4. In the Configure Application Credentials step, click the Generate a shared secret checkbox. Make a note of the secret that AD FS generates (perhaps by clicking the Copy to clipboard button and pasting the clipboard content into a file). You will add the secret to the NGINX Plus configuration in [Step 4 of _Configuring NGINX Plus_](#nginx-plus-variables). Click the  Next >  button. + + + +5. In the **Summary** step, verify that the information is correct, make any necessary corrections to previous steps, and click the  Next >  button. + + + +## Configuring NGINX Plus + +Configure NGINX Plus as the OpenID Connect relying party: + +1. Create a clone of the [nginx-openid-connect](https://github.com/nginxinc/nginx-openid-connect) GitHub repository. + + ```shell + git clone https://github.com/nginxinc/nginx-openid-connect + ``` + +2. Copy these files from the clone to **/etc/nginx/conf.d**: + + - **frontend.conf** + - **openid\_connect.js** + - **openid\_connect.server\_conf** + - **openid\_connect\_configuration.conf** + + +3. Get the URLs for the authorization endpoint, token endpoint, and JSON Web Key (JWK) file from the AD FS configuration. Run the following `curl` command in a terminal, piping the output to the indicated `python` command to output the entire configuration in an easily readable format. We've abridged the output to show only the relevant fields. + + ```shell + $ curl https:///oidc/adfs/.well-known/openid-configuration | python -m json.tool + { + ... + "authorization_endpoint": "https:///oidc/adfs/auth", + ... + "jwks_uri": "https:///oidc/adfs/certs", + ... + "token_endpoint": "https:///oidc/adfs/token", + ... + } + ``` + + +4. In your preferred text editor, open **/etc/nginx/conf.d/frontend.conf**. Change the "default" parameter value of each of the following [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map) directives to the specified value: + + - `map $host $oidc_authz_endpoint` – Value of `authorization_endpoint` from [Step 3](#nginx-plus-urls) (in this guide, `https:///oidc/adfs/auth`) + - `map $host $oidc_token_endpoint` – Value of `token_endpoint` from [Step 3](#nginx-plus-urls) (in this guide, `https:///oidc/adfs/token`) + - `map $host $oidc_client` – Value in the **Client ID** field from [Step 3 of _Configuring AD FS_](#ad-fs-server-application) (in this guide, `3e23f0eb-9329-46ff-9d37-6ad24afdfaeb`) + - `map $host $oidc_client_secret` – Value in the **Client secret** field from [Step 4 of _Configuring AD FS_](#ad-fs-configure-application-credentials) (in this guide, `NUeuULtSCjgXTGSkq3ZwEeCOiig4-rB2XiW_W`) + - `map $host $oidc_hmac_key` – A unique, long, and secure phrase + +5. Configure the JWK file. The procedure depends on which version of NGINX Plus you are using. + + - In NGINX Plus R17 and later, NGINX Plus can read the JWK file directly from the URL reported as `jwks_uri` in [Step 3](#nginx-plus-urls). Change **/etc/nginx/conf.d/frontend.conf** as follows: + + 1. Comment out (or remove) the [auth_jwt_key_file](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) directive. + 2. Uncomment the [auth_jwt_key_request](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directive. (Its parameter, `/_jwks_uri`, refers to the value of the `$oidc_jwt_keyfile` variable, which you set in the next step.) + 3. Change the second parameter of the `set $oidc_jwt_keyfile` directive to the value reported in the `jwks_uri` field in [Step 3](#nginx-plus-urls) (in this guide, `https:///oidc/adfs/certs`). + + - In NGINX Plus R16 and earlier, the JWK file must be on the local disk. (You can also use this method with NGINX Plus R17 and later if you wish.) + + 1. Copy the JSON contents from the JWK file named in the `jwks_uri` field in [Step 3](#nginx-plus-urls) (in this guide, `https:///oidc/adfs/certs`) to a local file (for example, `/etc/nginx/my_adfs_jwk.json`). + 2. In **/etc/nginx/conf.d/frontend.conf**, change the second parameter of the `set $oidc_jwt_keyfile` directive to the local file path. + +6. Confirm that the user named by the [user](http://nginx.org/en/docs/ngx_core_module.html#user) directive in the NGINX Plus configuration (in **/etc/nginx/nginx.conf** by convention) has read permission on the JWK file. + + +## Testing + +In a browser, enter the address of your NGINX Plus instance and try to log in using the credentials of a user who has access to the application. + + + + +## Troubleshooting + +See the [**Troubleshooting**](https://github.com/nginxinc/nginx-openid-connect#troubleshooting) section at the nginx-openid-connect repository on GitHub. + +### Revision History + +- Version 2 (March 2020) – Updates to _Configuring NGINX Plus_ section +- Version 1 (December 2019) – Initial version (NGINX Plus Release 20) diff --git a/content/nginx/deployment-guides/single-sign-on/auth0.md b/content/nginx/deployment-guides/single-sign-on/auth0.md new file mode 100644 index 000000000..ba72fb797 --- /dev/null +++ b/content/nginx/deployment-guides/single-sign-on/auth0.md @@ -0,0 +1,188 @@ +--- +description: Learn how to enable single sign-on (SSO) with [Auth0](https://auth0.com/) + for applications proxied by F5 NGINX Plus. +docs: DOCS-884 +doctypes: +- tutorial +tags: +- docs +title: Single Sign-On With Auth0 +toc: true +weight: 100 +--- + +
      + +This documentation applies to F5 NGINX Plus R15 and later. +
      + +You can use NGINX Plus with [Auth0](https://auth0.com/) and OpenID Connect to enable single sign-on (SSO) for your proxied applications. By following the steps in this guide, you will learn how to set up SSO using OpenID Connect as the authentication mechanism, with Auth0 as the identity provider (IdP), and NGINX Plus as the relying party. + +{{< see-also >}}{{< readfile file="includes/nginx-openid-repo-note.txt" markdown="true" >}}{{< /see-also >}} + +## Prerequisites + +To complete the steps in this guide, you need the following: + +- An Auth0 tenant with administrator privileges. +- [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) with a valid subscription. +- The [NGINX JavaScript module](https://www.nginx.com/products/nginx/modules/nginx-javascript/) (`njs`) -- the `njs` module handles the interaction between NGINX Plus and Auth0. + +## Install NGINX Plus and the njs Module {#install-nginx-plus-njs} + +1. If you do not already have NGINX Plus installed, follow the steps in the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) to do so. +2. Install the NGINX JavaScript module by following the steps in the [`njs` installation guide](https://nginx.org/en/docs/njs/install.html). +3. Add the following directive to the top-level ("main") configuration context in the NGINX Plus configuration (`/etc/nginx/nginx.conf`) to load the `njs` module: + + ```Nginx configuration file + load_module modules/ngx_http_js_module.so; + ``` + +## Configure Auth0 {#config-auth0} + +Take the steps in this section to create a new application for NGINX Plus. + +{{< note >}} This section contains images that reflect the state of the Auth0 web interface at the time of publication. The actual Auth0 GUI may differ from the examples shown here. Use this guide as a reference and adapt the instructions to suit the current Auth0 GUI as necessary.{{< /note >}} + +### Create a new Auth0 Application {#create-auth0-app} + +1. Log in to your Auth0 Dashboard at [manage.auth0.com](https://manage.auth0.com/). +1. Select **Applications > Applications** from the sidebar menu. +1. On the **Applications** page, select the **Create Application** button. +1. In the **Create application** window, provide the information listed below and then select **Create**. + + - **Name**: A name for the application, for example "nginx-plus-app". + - **Application Type**: **Regular Web Applications** + + {{< img src="/img/sso/auth0/sso-auth0-create-app.png" alt="image showing the Create application window in the Auth0 dashboard" >}} + +### Set up the Web Application {#web-app-setup} + +In this section, you'll set up a web application that follows the Auth0 [Authorization Code Flow](https://auth0.com/docs/get-started/authentication-and-authorization-flow/authorization-code-flow). + +1. On the **Application** page in the [Auth0 dashboard](https://manage.auth0.com/), select your web application. +1. Select the **Settings** tab for your application. +1. Make note of the Client ID and Client Secret displayed in the **Basic Information** section. + + {{< img src="/img/sso/auth0/sso-auth0-app.png" alt="image showing the basic information section of the web application settings in the Auth0 dashboard" >}} + +1. In the **Application URIs** section, provide the URI of the NGINX Plus instance in the **Allowed Callback URLs** field. + + - The URL must include the port number and end in **/_codexch**. In our example, we used the URL `http://nginx-plus-app:8010/_codexch`. + - The port is always required, even if you use the default port for HTTP (`80`) or HTTPS (`443`). + - The use of SSL/TLS (`443`) is strongly recommended for production environments. + + {{< img src="/img/sso/auth0/sso-auth0-app-settings.png" alt="image showing the Application URIs settings in the Auth0 dashboard" >}} + +1. In the **Advanced Settings** section, select the **Endpoints** tab. +1. Make note of the **OpenID Configuration** URL. + + {{< img src="/img/sso/auth0/sso-auth0-app-advanced-settings.png" alt="image showing the Advanced Application Settings section of the Auth0 dashboard" >}} + +1. Select **Save Changes**. + +### Set up Authentication {#authn-setup} + +{{< note >}}For the purposes of this guide, we will add a new Auth0 user database and user account to use for testing. + +You can set up authentication using any of the available [Auth0 identity providers](https://auth0.com/docs/authenticate/identity-providers). {{< /note >}} + +To set up a new user database and add a user account to it, take the steps below. + +1. Log in to the [Auth0 dashboard](https://manage.auth0.com/) and select **Authentication > Database** from the sidebar menu. +1. Select the **Create DB Connection** button. +1. Provide a **Name** for the database connection, then select **Create**. +1. On the **Database** page, select the **Applications** tab. Then, select the toggle button next to the [application you created earlier](#create-a-new-auth0-application). + + {{< img src="/img/sso/auth0/sso-auth0-db-app.png" alt="image showing the Applications settings for an OIDC Authentication database in the Auth0 dashboard" >}} + +1. In the sidebar menu, select **User Management > Users**. +1. On the **Users** page, select the **Create User** button. +1. In the **Create user** window, provide the following information, then select **Create**. + - **Email**: user's email + - **Password**: a password for the user account + - **Connection**: select your **database** from the list. + + {{< img src="/img/sso/auth0/sso-auth0-create-user.png" alt="image showing the Create User settings window in the Auth0 dashboard" >}} + +The user should receive an email to the email address provided. Once the user verifies their account by clicking on the link in the email, the account creation process is complete. + +## Set up NGINX Plus {#nginx-plus-setup} + +Take the steps in this section to set up NGINX Plus as the OpenID Connect relying party. + +### Configure NGINX OpenID Connect {#nginx-plus-oidc-config} + +1. Clone the [nginx-openid-connect](https://github.com/nginxinc/nginx-openid-connect) GitHub repository, or download the repo files. + + ```bash + git clone https://github.com/nginxinc/nginx-openid-connect.git + ``` + +1. Run the *configure.sh* script, which will update the NGINX configuration files with the values for your Auth0 application. + + For example: + + ```bash + ./nginx-openid-connect/configure.sh \ + --auth_jwt_key request \ + --client_id Nhotzxx...IERmUi \ + --client_secret 6ZHd0j_r...UtDZ5bkdu \ + https://.us.auth0.com/.well-known/openid-configuration + ``` + +1. In the `frontend.conf` file, update the **my_backend** upstream with the address of the application that you want to add OIDC authorization to. + + For example: + + ```Nginx configuration file + upstream my_backend { + zone my_backend 64k; + server my-backend-app.com:80; + } + ``` + +1. In the *openid_connect.server_conf* file, add the [`proxy_set_header`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) directive to the `/_jwks_uri` and `/_token` locations to `Accept-Encoding "gzip"`, as shown below. + + ```Nginx configuration file + ... + location = /_jwks_uri { + ... + proxy_set_header Accept-Encoding "gzip" + } + ... + location = /_token { + ... + proxy_set_header Accept-Encoding "gzip" + } + ... + ``` + +1. Copy the following files to the */etc/nginx/conf.d* directory on the host machine where NGINX Plus is installed: + + - `frontend.conf` + - `openid_connect.js` + - `openid_connect.server_conf` + - `openid_connect_configuration.conf` + +1. Reload the NGINX configuration: + + ```bash + sudo nginx -s reload + ``` + +## Test the Setup + +1. In a browser, enter the address of your NGINX Plus instance. You should be directed to the Auth0 login page, as shown in the example below. + + {{< img src="/img/sso/auth0/sso-auth0-login-test.png" alt="image showing an example Auth0 login screen that contains username and password fields" >}} + +1. You should be able to log in using the credentials of the user account that you created in the Auth0 database. + +## Troubleshooting + +Refer to the [Troubleshooting](https://github.com/nginxinc/nginx-openid-connect#troubleshooting) section in the `nginx-openid-connect` repository on GitHub. + +## Revision History + +- Version 1 (May 2022) - Initial version diff --git a/content/nginx/deployment-guides/single-sign-on/cognito.md b/content/nginx/deployment-guides/single-sign-on/cognito.md new file mode 100644 index 000000000..39dd9f9fd --- /dev/null +++ b/content/nginx/deployment-guides/single-sign-on/cognito.md @@ -0,0 +1,189 @@ +--- +description: Enable OpenID Connect-based single-sign for applications proxied by NGINX + Plus, using Amazon Cognito as the identity provider (IdP). +docs: DOCS-464 +doctypes: +- task +title: Single Sign-On with Amazon Cognito +toc: true +weight: 100 +--- + +This guide explains how to enable single sign‑on (SSO) for applications being proxied by F5 NGINX Plus. The solution uses OpenID Connect as the authentication mechanism, with [Amazon Cognito](https://aws.amazon.com/cognito/) as the identity provider (IdP), and NGINX Plus as the relying party. + +{{< see-also >}}{{< readfile file="includes/nginx-openid-repo-note.txt" markdown="true" >}}{{< /see-also >}} + + + +## Prerequisites + +The instructions assume you have the following: + +- An [AWS account](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account/). +- An NGINX Plus subscription and NGINX Plus R15 or later. For installation instructions, see the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/). +- The [NGINX JavaScript module](https://www.nginx.com/blog/introduction-nginscript/) (njs), required for handling the interaction between NGINX Plus and the IdP. After installing NGINX Plus, install the module with the command for your operating system. + + For Debian and Ubuntu: + + ```none + sudo apt install nginx-plus-module-njs + ``` + + For CentOS, RHEL, and Oracle Linux: + + ```shell + sudo yum install nginx-plus-module-njs + ``` + +- The following directive included in the top-level ("main") configuration context in **/etc/nginx/nginx.conf**, to load the NGINX JavaScript module: + + ```nginx + load_module modules/ngx_http_js_module.so; + ``` + + +## Configuring Amazon Cognito + +**Note:** The following procedure reflects the Cognito GUI at the time of publication, but the GUI is subject to change. Use this guide as a reference and adapt to the current Cognito GUI as necessary. + +Create a new application for NGINX Plus in the Cognito GUI: + +1. Log in to your AWS account, open the AWS Management Console ([console.aws.amazon.com](https://console.aws.amazon.com)), and navigate to the Cognito dashboard (you can, for example, click **Cognito** in the **Security, Identity, & Compliance** section of the **Services** drop‑down menu). + +2. On the Cognito dashboard, click **Manage User Pools** to open the **Your User Pools** window. Click the  Create a user pool  button or the highlighted phrase. + + + +3. In the **Create a user pool** window that opens, type a value in the **Pool name** field (in this guide, it's nginx-plus-pool), then click the Review defaults button. + + + + +4. On the **Review** tab which opens, click Add app client... in the **App clients** field near the bottom. + + + +5. On the **App clients** tab which opens, click Add an app client. + +6. On the **Which app clients will have access to this user pool?** window which opens, enter a value (in this guide, nginx-plus-app) in the App client name field. Make sure the Generate client secret box is checked, then click the  Create app client  button. + + + +7. On the confirmation page which opens, click Return to pool details to return to the **Review** tab. On that tab click the  Create pool  button at the bottom. (The screenshot in [Step 4](#cognito-review-tab) shows the button.) + + +8. On the details page which opens to confirm the new user pool was successfully created, make note of the value in the **Pool Id** field; you will add it to the NGINX Plus configuration in [Step 3 of _Configuring NGINX Plus_](#nginx-plus-variables). + + 'General settings' tab in Amazon Cognito GUI + + +9. Click Users and groups in the left navigation column. In the interface that opens, designate the users (or group of users, on the **Groups** tab) who will be able to use SSO for the app being proxied by NGINX Plus. For instructions, see the Cognito documentation about [creating users](https://docs.aws.amazon.com/cognito/latest/developerguide/how-to-create-user-accounts.html), [importing users](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-using-import-tool.html), or [adding a group](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-user-groups.html). + + 'Users and groups' tab in Amazon Cognito GUI + +10. Click **App clients** in the left navigation bar. On the tab that opens, click the Show Details button in the box labeled with the app client name (in this guide, nginx-plus-app). + + 'App clients' tab in Amazon Cognito GUI + + +11. On the details page that opens, make note of the values in the App client id and App client secret fields. You will add them to the NGINX Plus configuration in [Step 3 of _Configuring NGINX Plus_](#nginx-plus-variables). + + + +12. Click App client settings in the left navigation column. In the tab that opens, perform the following steps: + + 1. In the Enabled Identity Providers section, click the Cognito User Pool checkbox (the **Select all** box gets checked automatically). + 2. In the **Callback URL(s)** field of the Sign in and sign out URLs section, type the URI of the NGINX Plus instance including the port number, and ending in **/\_codexch**. Here we’re using https://my-nginx-plus.example.com:443/_codexch. + + **Notes:** + + - For production, we strongly recommend that you use SSL/TLS (port 443). + - The port number is mandatory even when you're using the default port for HTTP (80) or HTTPS (443). + + 3. In the **OAuth 2.0** section, click the Authorization code grant checkbox under Allowed OAuth Flows and the **email**, **openid**, and **profile** checkboxes under Allowed OAuth Scopes. + 4. Click the  Save changes  button. + + + + +13. Click **Domain name** in the left navigation column. In the tab that opens, type a domain prefix in the **Domain prefix** field under Amazon Cognito domain (in this guide, my-nginx-plus). Click the  Save changes  button. + + + + +## Configuring NGINX Plus + +Configure NGINX Plus as the OpenID Connect relying party: + +1. Create a clone of the [nginx-openid-connect](https://github.com/nginxinc/nginx-openid-connect) GitHub repository. + + ```shell + git clone https://github.com/nginxinc/nginx-openid-connect + ``` + +2. Copy these files from the clone to **/etc/nginx/conf.d**: + + - **frontend.conf** + - **openid_connect.js** + - **openid_connect.server\_conf** + + +3. In your preferred text editor, open **/etc/nginx/conf.d/frontend.conf**. Change the second parameter of each of the following [set](http://nginx.org/en/docs/http/ngx_http_rewrite_module.html#set) directives to the specified value. + + The `` variable is the full value in the **Domain prefix** field in [Step 13 of _Configuring Amazon Cognito_](#cognito-domain-name). In this guide it is https://my-nginx-plus.auth.us-east-2.amazoncognito.com. + + - `set $oidc_authz_endpoint` – `/oauth2/authorize` + - `set $oidc_token_endpoint` – `/oauth2/token` + - `set $oidc_client` – Value in the App client id field from [Step 11 of _Configuring Amazon Cognito_](#cognito-app-client-id-secret) (in this guide, `2or4cs8bjo1lkbq6143tqp6ist`) + - `set $oidc_client_secret` – Value in the App client secret field from [Step 11 of _Configuring Amazon Cognito_](#cognito-app-client-id-secret) (in this guide, `1k63m3nrcnu...`) + - `set $oidc_hmac_key` – A unique, long, and secure phrase + +4. Configure the JWK file. The file's URL is + + **https://cognito-idp.**_region_**.amazonaws.com/**_User-Pool-ID_**/.well-known/jwks.json** + + where + + - _region_ is the same AWS region name as in the `` variable used in [Step 3](#nginx-plus-variables) + - _User-Pool-ID_ is the value in the **Pool Id** field in [Step 8 of _Configuring Amazon Cognito_](#cognito-pool-id) + + In this guide, the URL is + + https://cognito-idp.us-east-2.amazonaws.com/us-east-2_mLoGHJpOs/.well-known/jwks.json. + + The method for configuring the JWK file depends on which version of NGINX Plus you are using: + + - In NGINX Plus R17 and later, NGINX Plus can read the JWK file directly. Change **/etc/nginx/conf.d/frontend.conf** as follows: + + 1. Comment out (or remove) the [auth_jwt_key_file](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) directive. + 2. Uncomment the [auth_jwt_key_request](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directive. (Its parameter, `/_jwks_uri`, refers to the value of the `$oidc_jwt_keyfile` variable, which you set in the next step.) + 3. Change the second parameter of the `set $oidc_jwt_keyfile` directive to the URL of the JWK file (`https://cognito-idp.../.well-known/jwks.json`). + + - In NGINX Plus R16 and earlier, the JWK file must be on the local disk. (You can also use this method with NGINX Plus R17 and later if you wish.) + + 1. Copy the JSON contents from the JWK file (****) to a local file (for example, **/etc/nginx/my\_cognito\_jwk.json**). + 2. In **/etc/nginx/conf.d/frontend.conf**, change the second parameter of the `set $oidc_jwt_keyfile` directive to the local file path. + +5. At the time of publication, Cognito does not support the OpenID **offline_access** scope. Open **/etc/nginx/conf.d/openid\_connect.server\_conf** in a text editor and remove `+offline_access` from the list of scopes on line 10, so that it looks like this: + + ```nginx + return 302 "$oidc_authz_endpoint?response_type=code&scope=openid+profile+email&client_id=$oidc_clientaws...; + ``` + +6. Confirm that the user named by the [user](http://nginx.org/en/docs/ngx_core_module.html#user) directive in the NGINX Plus configuration (in **/etc/nginx/nginx.conf** by convention) has read permission on the JWK file. + + +## Testing + +In a browser, enter the address of your NGINX Plus instance and try to log in using the credentials of a user assigned to the application (see [Step 9 in _Configuring Amazon Cognito_](#cognito-users)). The NGINX logo that appears in the screenshot was added on Cognito's **UI customization** tab (not shown in this guide). + + + + +## Troubleshooting + +See the [**Troubleshooting**](https://github.com/nginxinc/nginx-openid-connect#troubleshooting) section at the nginx-openid-connect repository on GitHub. + +### Revision History + +- Version 1 (March 2020) – Initial version (NGINX Plus Release 20) diff --git a/content/nginx/deployment-guides/single-sign-on/keycloak.md b/content/nginx/deployment-guides/single-sign-on/keycloak.md new file mode 100644 index 000000000..7444a9696 --- /dev/null +++ b/content/nginx/deployment-guides/single-sign-on/keycloak.md @@ -0,0 +1,168 @@ +--- +description: Enable OpenID Connect-based single-sign for applications proxied by NGINX + Plus, using Keycloak as the identity provider (IdP). +docs: DOCS-465 +doctypes: +- task +title: Single Sign-On with Keycloak +toc: true +weight: 100 +--- + +This guide explains how to enable single sign-on (SSO) for applications being proxied by F5 NGINX Plus. The solution uses OpenID Connect as the authentication mechanism, with [Keycloak](https://www.keycloak.org/) as the identity provider (IdP), and NGINX Plus as the relying party. + +{{< see-also >}}{{< readfile file="includes/nginx-openid-repo-note.txt" markdown="true" >}}{{< /see-also >}} + + + +## Prerequisites + +The instructions assume you have the following: + +- A running Keycloak server. See the Keycloak documentation for [Getting Started](https://www.keycloak.org/guides#getting-started) and [Server](https://www.keycloak.org/guides#server) configuration instructions. +- An NGINX Plus subscription and NGINX Plus R15 or later. For installation instructions, see the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/). +- The [NGINX JavaScript module](https://www.nginx.com/blog/introduction-nginscript/) (njs), required for handling the interaction between NGINX Plus and the IdP. After installing NGINX Plus, install the module with the command for your operating system. + + For Debian and Ubuntu: + + ```none + sudo apt install nginx-plus-module-njs + ``` + + For CentOS, RHEL, and Oracle Linux: + + ```shell + sudo yum install nginx-plus-module-njs + ``` + +- The following directive included in the top-level ("main") configuration context in **/etc/nginx/nginx.conf**, to load the NGINX JavaScript module: + + ```nginx + load_module modules/ngx_http_js_module.so; + ``` + + +## Configuring Keycloak + +**Note:** The following procedure reflects the Keycloak GUI at the time of publication, but the GUI is subject to change. Use this guide as a reference and adapt to the current Keycloak GUI as necessary. + +Create a Keycloak client for NGINX Plus in the Keycloak GUI: + +1. Access the Keycloak Admin Console at **http://_keycloak-server-address_:8080/auth/admin/** and log in. + +2. In the left navigation column, click **Clients**. On the **Clients** page that opens, click the **Create** button in the upper right corner. + + +3. On the **Add Client** page that opens, enter or select these values, then click the  Save  button. + + - **Client ID** – The name of the application for which you're enabling SSO (Keycloak refers to it as the “client”). Here we're using NGINX-Plus. + - **Client Protocol** – openid-connect. + + + +4. On the **NGINX Plus** page that opens, enter or select these values on the Settings tab: + + - **Access Type** – confidential + - **Valid Redirect URIs** – The URI of the NGINX Plus instance, including the port number, and ending in **/\_codexch** (in this guide it is https://my-nginx.example.com:443/_codexch) + + **Notes:** + + - For production, we strongly recommend that you use SSL/TLS (port 443). + - The port number is mandatory even when you're using the default port for HTTP (80) or HTTPS (443). + + + + +5. Click the Credentials tab and make a note of the value in the **Secret** field. You will copy it into the NGINX Plus configuration file in [Step 4 of _Configuring NGINX Plus_](#nginx-plus-variables). + + + +6. Click the Roles tab, then click the **Add Role** button in the upper right corner of the page that opens. + +7. On the **Add Role** page that opens, type a value in the **Role Name** field (here it is nginx-keycloak-role) and click the  Save  button. + + + +8. In the left navigation column, click **Users**. On the **Users** page that opens, either click the name of an existing user, or click the **Add user** button in the upper right corner to create a new user. For complete instructions, see the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/index.html#user-management). + + +9. On the management page for the user (here, user01), click the Role Mappings tab. On the page that opens, select NGINX-Plus on the **Client Roles** drop‑down menu. Click nginx-keycloak-role in the **Available Roles** box, then click the **Add selected** button below the box. The role then appears in the **Assigned Roles** and **Effective Roles** boxes, as shown in the screenshot. + + + + + +## Configuring NGINX Plus + +Configure NGINX Plus as the OpenID Connect relying party: + +1. Create a clone of the [nginx-openid-connect](https://github.com/nginxinc/nginx-openid-connect) GitHub repository. + + ```shell + git clone https://github.com/nginxinc/nginx-openid-connect + ``` + +2. Copy these files from the clone to **/etc/nginx/conf.d**: + + - **frontend.conf** + - **openid\_connect.js** + - **openid\_connect.server\_conf** + - **openid\_connect\_configuration.conf** + + +3. Get the URLs for the authorization endpoint, token endpoint, and JSON Web Key (JWK) file from the Keycloak configuration. Run the following `curl` command in a terminal, piping the output to the indicated `python` command to output the entire configuration in an easily readable format. We've abridged the output to show only the relevant fields. + + ```shell + $ curl https:///auth/realms/master/.well-known/openid-configuration | python -m json.tool + ... + { + "authorization_endpoint": "https:///auth/realms/master/protocol/openid-connect/auth", + ... + "jwks_uri": "https:///auth/realms/master/protocol/openid-connect/certs", + ... + "token_endpoint": "https:///auth/realms/master/protocol/openid-connect/token", + ... + } + ``` + + +4. Using your preferred text editor, open **/etc/nginx/conf.d/openid_connect_configuration.conf**. Change the "default" parameter value of each of the following [map](https://nginx.org/en/docs/http/ngx_http_map_module.html#map) directives to the specified value: + + - `map $host $oidc_authz_endpoint` – Value of `authorization_endpoint` from [Step 3](#nginx-plus-urls) (in this guide, `https:///auth/realms/master/protocol/openid-connect/auth`) + - `map $host $oidc_token_endpoint` – Value of `token_endpoint` from [Step 3](#nginx-plus-urls) (in this guide, `https:///auth/realms/master/protocol/openid-connect/token`) + - `map $host $oidc_client` – Value in the **Client ID** field from [Step 3 of _Configuring Keycloak_](#keycloak-client-id) (in this guide, `NGINX Plus`) + - `map $host $oidc_client_secret` – Value in the **Secret** field from [Step 5 of _Configuring Keycloak_](#keycloak-secret) (in this guide, `w4b32c4b0-f4b3-41d9-a345-2bc0fbfcaaba`) + - `map $host $oidc_hmac_key` – A unique, long, and secure phrase + +5. Configure the JWK file. The procedure depends on which version of NGINX Plus you are using. + + - In NGINX Plus R17 and later, NGINX Plus can read the JWK file directly from the URL reported as `jwks_uri` in [Step 3](#nginx-plus-urls). Change **/etc/nginx/conf.d/frontend.conf** as follows: + + 1. Comment out (or remove) the [auth_jwt_key_file](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) directive. + + 2. Uncomment the [auth_jwt_key_request](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directive. (Its parameter, `/_jwks_uri`, refers to the value of the `$oidc_jwt_keyfile` variable, which you set in the next step.) + 3. Change the "default" parameter of the `map $host $oidc_jwt_keyfile` directive to the value reported in the `jwks_uri` field in [Step 3](#nginx-plus-urls) (in this guide, `https:///auth/realms/master/protocol/openid-connect/certs`). + + - In NGINX Plus R16 and earlier, the JWK file must be on the local disk. (You can also use this method with NGINX Plus R17 and later if you wish.) + + 1. Copy the JSON contents from the JWK file named in the `jwks_uri` field in [Step 3](#nginx-plus-urls) (in this guide, `https:///auth/realms/master/protocol/openid-connect/certs`) to a local file (for example, `/etc/nginx/my_keycloak_jwk.json`). + 2. In **/etc/nginx/conf.d/openid_connect_configuration.conf**, change the "default" parameter of the `map $host $oidc_jwt_keyfile` directive to the local file path. + +6. Confirm that the user named by the [user](http://nginx.org/en/docs/ngx_core_module.html#user) directive in the NGINX Plus configuration (in **/etc/nginx/nginx.conf** by convention) has read permission on the JWK file. + + +## Testing + +In a browser, enter the address of your NGINX Plus instance and try to log in using the credentials of a user mapped to the role for NGINX Plus (see [Step 9 of _Configuring Keycloak_](#keycloak-users)). + + + + +## Troubleshooting + +See the [**Troubleshooting**](https://github.com/nginxinc/nginx-openid-connect#troubleshooting) section at the nginx-openid-connect repository on GitHub. + +### Revision History + +- Version 2 (March 2020) – Updates to _Configuring NGINX Plus_ section +- Version 1 (November 2019) – Initial version (NGINX Plus Release 19) diff --git a/content/nginx/deployment-guides/single-sign-on/okta.md b/content/nginx/deployment-guides/single-sign-on/okta.md new file mode 100644 index 000000000..c839e87b6 --- /dev/null +++ b/content/nginx/deployment-guides/single-sign-on/okta.md @@ -0,0 +1,189 @@ +--- +description: Learn how to enable single sign-on (SSO) with Okta for applications proxied + by F5 NGINX Plus. +docs: DOCS-466 +doctypes: +- task +title: Single Sign-On with Okta +toc: true +weight: 100 +--- + +
      + +This documentation applies to F5 NGINX Plus R15 and later. +
      + +You can use NGINX Plus with Okta and OpenID Connect to enable single sign-on (SSO) for your proxied applications. By following the steps in this guide, you will learn how to set up SSO using OpenID Connect as the authentication mechanism, with Okta as the identity provider (IdP), and NGINX Plus as the relying party. + +{{< see-also >}}{{< readfile file="includes/nginx-openid-repo-note.txt" markdown="true" >}}{{< /see-also >}} + +## Prerequisites + +To complete the steps in this guide, you need the following: + +- An Okta administrator account. +- [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) with a valid subscription. +- The [NGINX JavaScript module](https://www.nginx.com/products/nginx/modules/nginx-javascript/) (`njs`) -- the `njs` module handles the interaction between NGINX Plus and Okta. +- Install `jq` on the host machine where you installed NGINX Plus. + +## Install NGINX Plus and the njs Module + +1. If you do not already have NGINX Plus installed, follow the steps in the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) to do so. +2. Install the NGINX JavaScript module by following the steps in the [`njs` installation guide](https://nginx.org/en/docs/njs/install.html). +3. Add the following directive to the top-level ("main") configuration context in the NGINX Plus configuration (`/etc/nginx/nginx.conf`) to load the `njs` module: + + ```Nginx configuration file + load_module modules/ngx_http_js_module.so; + ``` + +## Configure Okta {#okta} + +Take the steps in this section to create a new application for NGINX Plus. + +{{< note >}} This section contains images that reflect the state of the Okta web interface at the time of publication. The actual Okta GUI may differ from the examples shown here. Use this guide as a reference and adapt the instructions to suit the current Okta GUI as necessary.{{< /note >}} + +This section describes the Okta Workforce Identity SSO product. You will need administrator access to your organization in Okta to complete this task. Your experience may differ somewhat if you're using the Okta Customer Identity product. + +### Create a New Okta Web Application + + + +1. Log in to Okta at [okta.com](https:///www.okta.com). +1. Select the **Admin** button next to your username to access the Admin console. +1. On your Admin dashboard, select **Applications** in the left-hand navigation menu. +1. On the **Applications** page, select the **Create App Integration** button. +1. In the **Create a new app integration** window, define the following values, then select **Next**: + + - **Sign-in method**: OIDC - OpenID Connect + - **Application type**: Web Application
      + + {{< img src="/img/sso/okta/Okta-Create-New-Application-Integration.png" alt="image showing the Create a new app integration window in the Okta UI, with OIDC and Web Application options selected" width="65%" >}} + +### Set up the Web App Integration {#okta-integration} + +On the **New Web App Integration** page in the Okta web interface, fill in the following information, then select **Save**. + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Desciption | Example Value | +|-------------|---------|----------| +| **App integration name** | The name of the OpenID Connect relying party. Okta refers to this as the "application". | **NGINX-Plus** | +| **Sign-in redirect URIs** | The URI of the NGINX Plus instance -- including the port number -- ending in **`/_codexch`**.
      • The port is always required, even if you use the default port for HTTP (`80`) or HTTPS (`443`).
      • The use of SSL/TLS (`443`) is strongly recommended for production environments.
      | `https://my-nginx.example.com:443/_codexch` | +| **Sign-out redirect URIs** | The URI to redirect users to after logging out.
      This is an optional field with a default value of `http://localhost:8080`. | We removed the default value in our example. | +| **Controlled access** | Controls who can access the application. | "Allow everyone in your organization to access"
      **You should select the appropriate value for your use case.**| + +{{< /bootstrap-table >}} + +{{< img alt="Okta Create OpenID Connect Integration" src="/img/sso/okta/Okta-Create-OpenID-Connect-Integration.png" >}} + +### Get the Okta App Client Credentials {#okta-client-id-secret} + +After you finish creating your application, the Okta Application page should display. You can find the Client Credentials for your Okta Application here. + +{{< img src="/img/sso/okta/Okta-Client-Credentials.png" alt="Image showing the application landing page in Okta, which contains the Client Credentials for the application." width="65%" >}} + +{{< tip >}}If you need to find this information later, log in to your Okta admin account as [described above](#okta-login), select **Applications** in the left-hand menu, then select your application.{{< /tip >}} + +Make note of the **Client ID** and **Client secret** values for your application. You will need these when you [configure NGINX Plus](#nginx-plus). + +### Manage Access to your Okta Application {#okta-assign-applications} + +To change the users and groups that have access to your Okta Application: + +1. Log in to Okta as an Admin as [described above](#okta-login). +1. Select **Applications** in the left-hand menu, then select your application. +1. Select the **Assignments** tab for the Application. + +Here, you can manage which users in your organization are granted access to this application. + +## Set up NGINX Plus {#nginx-plus} + +Take the steps in this section to set up NGINX Plus as the OpenID Connect relying party. + +### Configure NGINX OpenID Connect {#nginx-plus-oidc-config} + +1. Clone the [nginx-openid-connect](https://github.com/nginxinc/nginx-openid-connect) GitHub repository, or download the repo files. + + ```shell + git clone https://github.com/nginxinc/nginx-openid-connect.git + ``` + +1. Copy the following files to the `/etc/nginx/conf.d` directory on the host machine where NGINX Plus is installed: + + - `frontend.conf` + - `openid_connect.js` + - `openid_connect.server_conf` + - `openid_connect_configuration.conf` + +1. Get the URLs for the authorization endpoint, token endpoint, and JSON Web Key (JWK) file from the Okta configuration. + + Run the following `curl` command in a terminal. + {{< tip>}}We recommend piping the output to `jq` to output the entire configuration in an easily readable format.{{< /tip >}} + The output in the example below is abridged to show only the relevant fields. + + ```shell + curl https://-admin.okta.com/.well-known/openid-configuration | jq + ... + { + "authorization_endpoint": "https://.okta.com/oauth2/v1/authorize", + ... + "jwks_uri": "https://.okta.com/oauth2/v1/keys", + ... + "token_endpoint": "https://.okta.com/oauth2/v1/token", + ... + } + ``` + + + +1. Add the correct values for your IdP to the OpenID Connect configuration file (`/etc/nginx/conf.d/openid_connect_configuration.conf`). + + This file contains the primary configuration for one or more IdPs in `map{}` blocks. You should modify the `map…$oidc_` blocks as appropriate to match your IdP configuration. + + - Define the `$oidc_authz_endpoint`, `$oidc_token_endpoint`, and `$oidc_jwt_keyfile` values using the information returned in the previous step. + - Change the URI defined in `map…$oidc_logout_redirect` to the URI of a resource (for example, your home page) that should be displayed after a client requests the `/logout` location. + - Set a unique, long, and secure phrase for `$oidc_hmac_key` to ensure nonce values are unpredictable. + +### Set up JSON Web Key Authorization {#nginx-plus-jwk-config} + +NGINX Plus can read the JWK file directly from the URL reported as `jwks_uri` in the output of the `curl` command you ran in the [previous section](#nginx-plus-oidc-config). + +{{< note >}} +If you are using NGINX Plus R16 or earlier, refer to [Set up JWK Authorization using a local file](#nginx-plus-jwk-auth-local). +{{< /note >}} + +Take the following steps to set up NGINX Plus to access the JWK file by using a URI. + +1. In the `/etc/nginx/conf.d/frontend.conf` file, remove (or comment out) the [auth_jwt_key_file](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) directive. +1. Uncomment the [auth_jwt_key_request](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directive. + + The parameter `/_jwks_uri` refers to the value of the `$oidc_jwt_keyfile` variable, which you already set in the OpenID Connect configuration file (`/etc/nginx/conf.d/openid_connect_configuration.conf`). + +#### Set up JWK Authorization using a Local File {#nginx-plus-jwk-auth-local} + +In NGINX Plus R16 and earlier, NGINX Plus cannot access the JWK file via the URI. Instead, the JWK file must be on the local disk. + +Take the steps below to set up JWK authorization using a local file: + +1. Copy the JSON contents from the JWK file named in the `jwks_uri` field to a local file. For example, `/etc/nginx/my_okta_jwk.json` +1. In `/etc/nginx/conf.d/frontend.conf`, change the second parameter of the `set $oidc_jwt_keyfile` directive to the local file path of the JWK file. +1. Confirm that the user named by the [user](http://nginx.org/en/docs/ngx_core_module.html#user) directive in the NGINX Plus configuration -- usually found in `/etc/nginx/nginx.conf` -- has read permission on the JWK file. + +## Test Your Setup + +1. In a browser, enter the address of your NGINX Plus instance. You should be directed to the okta login page, as shown in the example below. + {{< img src="img/sso/okta/Okta-login-window.png" >}} +1. Try to log in using the credentials of a user who is part of your organization. + +{{}}If you restricted access to a group of users, be sure to select a user who has access to the application.{{}} + +## Troubleshooting + +Refer to the [Troubleshooting](https://github.com/nginxinc/nginx-openid-connect#troubleshooting) section in the `nginx-openid-connect` repository on GitHub. + +### Revision History + +- Version 3 (March 2022) – Full edit incorporating updates to _Configuring Okta_ and _Configuring NGINX Plus_ +- Version 2 (March 2020) – Updates to _Configuring NGINX Plus_ section +- Version 1 (April 2019) – Initial version (NGINX Plus Release 17) diff --git a/content/nginx/deployment-guides/single-sign-on/onelogin.md b/content/nginx/deployment-guides/single-sign-on/onelogin.md new file mode 100644 index 000000000..21eec6909 --- /dev/null +++ b/content/nginx/deployment-guides/single-sign-on/onelogin.md @@ -0,0 +1,160 @@ +--- +description: Learn how to enable single sign-on (SSO) with [OneLogin](https://www.onelogin.com/) + for applications proxied by F5 NGINX Plus. +docs: DOCS-467 +doctypes: +- tutorial +tags: +- docs +title: Single Sign-On with OneLogin +toc: true +weight: 100 +--- + +
      + +This documentation applies to F5 NGINX Plus R15 and later. +
      + +You can use NGINX Plus with [OneLogin](https://www.onelogin.com/) and the OpenID Connect protocol to enable single sign-on (SSO) for your proxied applications. By following the steps in this guide, you will learn how to set up SSO using OpenID Connect as the authentication mechanism, with OneLogin as the identity provider (IdP) and NGINX Plus as the relying party. + +{{< see-also >}}{{< readfile file="includes/nginx-openid-repo-note.txt" markdown="true" >}}{{< /see-also >}} + +## Prerequisites + +To complete the steps in this guide, you need the following: + +- A OneLogin tenant with administrator privileges. +- [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) with a valid subscription. +- The [NGINX JavaScript module](https://www.nginx.com/products/nginx/modules/nginx-javascript/) (`njs`) -- the `njs` module handles the interaction between NGINX Plus and OneLogin identity provider (IdP). + +## Install NGINX Plus and the njs Module {#install-nginx-plus-njs} + +1. If you do not already have NGINX Plus installed, follow the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) steps to do so. +2. Install the NGINX JavaScript module by following the steps in the [`njs` installation guide](https://nginx.org/en/docs/njs/install.html). +3. Add the following directive to the top-level ("main") configuration context in the NGINX Plus configuration (`/etc/nginx/nginx.conf`) to load the `njs` module: + + ```Nginx configuration file + load_module modules/ngx_http_js_module.so; + ``` + +## Configure OneLogin {#config-onelogin} + +**Note:** The following procedure reflects the OneLogin GUI at the time of publication, but the GUI is subject to change. Use this guide as a reference and adapt to the current OneLogin GUI as necessary. + +Create a new application for NGINX Plus in the OneLogin GUI: + +1. Log in to your OneLogin account at **https://**_domain_**.onelogin.com**, where _domain_ is the domain you chose when you created your account. + +2. Click  Applications  in the title bar and then click the  Add App  button in the upper right corner of the window that opens. + + + +3. On the **Find Applications** page that opens, type OpenID Connect in the search box. Click on the **OpenID Connect (OIDC)** row that appears. + + + +4. On the **Add OpenId Connect (OIDC)** page that opens, change the value in the **Display Name** field to NGINX Plus and click the  Save  button. + + + +5. When the save completes, a new set of choices appears in the left navigation bar. Click **Configuration**. In the **Redirect URI's** field, type the URI of the NGINX Plus instance including the port number, and ending in **/\_codexch** (in this guide it is https://my-nginx.example.com:443/_codexch). Then click the  Save  button. + + **Notes:** + + - For production, we strongly recommend that you use SSL/TLS (port 443). + - The port number is mandatory even when you're using the default port for HTTP (80) or HTTPS (443). + + + + +6. When the save completes, click **SSO** in the left navigation bar. Click Show client secret below the **Client Secret** field. Record the values in the **Client ID** and **Client Secret** fields. You will add them to the NGINX Plus configuration in [Step 4 of _Configuring NGINX Plus_](#nginx-plus-variables). + + + + +7. Assign users to the application (in this guide, NGINX Plus) to enable them to access it for SSO. OneLogin recommends using [roles](https://onelogin.service-now.com/kb_view_customer.do?sysparm_article=KB0010606) for this purpose. You can access the **Roles** page under  Users  in the title bar. + + + + +## Set up NGINX Plus + +Take the steps in this section to set up NGINX Plus as the OpenID Connect Client. + +### Configure NGINX OpenID Connect {#nginx-plus-oidc-config} + +1. Clone the [nginx-openid-connect](https://github.com/nginxinc/nginx-openid-connect) GitHub repository, or download the repository files. + + ```shell + git clone https://github.com/nginxinc/nginx-openid-connect.git + ``` + +1. Run the _configure.sh_ script to update the NGINX configuration files with the values for your OneLogin application. + + For example: + + ```bash + ./nginx-openid-connect/configure.sh \ + --auth_jwt_key request \ + --client_id 168d5600-9224-... \ + --client_secret c9210a67d09e85... \ + https://.onelogin.com/oidc/2/.well-known/openid-configuration + ``` + +2. In the `frontend.conf` file, update the **my_backend** upstream with the address of the application that you want to add OIDC authorization to. + + For example: + + ```Nginx configuration file + upstream my_backend { + zone my_backend 64k; + server my-backend-app.com:80; + } + ``` + +3. In the _openid_connect.server_conf_ file, add the [`proxy_set_header`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header) directive to the `/_jwks_uri` and `/_token` locations to `Accept-Encoding "gzip"`, as shown below. + + ```Nginx configuration file + ... + location = /_jwks_uri { + ... + proxy_set_header Accept-Encoding "gzip" + } + ... + location = /_token { + ... + proxy_set_header Accept-Encoding "gzip" + } + ... + ``` + +4. Copy the following files to the _/etc/nginx/conf.d_ directory on the host machine where NGINX Plus is installed: + + - `frontend.conf` + - `openid_connect.js` + - `openid_connect.server_conf` + - `openid_connect_configuration.conf` + +5. Reload the NGINX configuration: + + ```bash + sudo nginx -s reload + ``` + +## Test Your Setup + +In a browser, enter the address of your NGINX Plus instance and try to log in using the credentials of a user assigned to the application (see [Step 7 of _Configuring OneLogin_](#onelogin-roles)). + + + + +## Troubleshooting + +Refer to the [Troubleshooting](https://github.com/nginxinc/nginx-openid-connect#troubleshooting) section in the `nginx-openid-connect` repository on GitHub. + +### Revision History + +- Version 3 (May 2022) - Updates OneLogin's OpenId Connect API endpoints from version 1 to version 2 +- Version 2 (March 2020) – Updates to _Configuring NGINX Plus_ section +- Version 1 (July 2019) – Initial version (NGINX Plus Release 18) diff --git a/content/nginx/deployment-guides/single-sign-on/ping-identity.md b/content/nginx/deployment-guides/single-sign-on/ping-identity.md new file mode 100644 index 000000000..b1d0cacb0 --- /dev/null +++ b/content/nginx/deployment-guides/single-sign-on/ping-identity.md @@ -0,0 +1,193 @@ +--- +description: Enable OpenID Connect-based single-sign for applications proxied by NGINX + Plus, using Ping Identity as the identity provider (IdP). +docs: DOCS-468 +doctypes: +- task +title: Single Sign-On with Ping Identity +toc: true +weight: 100 +--- + +This guide explains how to enable single sign-on (SSO) for applications being proxied by F5 NGINX Plus. The solution uses OpenID Connect as the authentication mechanism, with Ping Identity as the identity provider (IdP) and NGINX Plus as the relying party. + +The instructions in this document apply to both Ping Identity's on‑premises and cloud products, PingFederate and PingOne for Enterprise. + +{{< see-also >}}{{< readfile file="includes/nginx-openid-repo-note.txt" markdown="true" >}}{{< /see-also >}} + + +## Prerequisites + +The instructions assume you have the following: + +- A running deployment of PingFederate or PingOne for Enterprise, and a Ping Identity account. For installation and configuration instructions, see the documentation for [PingFederate](https://docs.pingidentity.com/bundle/pingfederate-93/page/tau1564002955783.html) or [PingOne for Enterprise](https://docs.pingidentity.com/bundle/pingone/page/fjn1564020491958-1.html). +- An NGINX Plus subscription and NGINX Plus R15 or later. For installation instructions, see the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/). +- The NGINX JavaScript module (njs), required for handling the interaction between NGINX Plus and the IdP. After installing NGINX Plus, install the module with the command for your operating system. + + For Debian and Ubuntu: + + ```none + sudo apt install nginx-plus-module-njs + ``` + + For CentOS, RHEL, and Oracle Linux: + + ```shell + sudo yum install nginx-plus-module-njs + ``` + +- The following directive included in the top-level ("main") configuration context in **/etc/nginx/nginx.conf**, to load the NGINX JavaScript module: + + ```nginx + load_module modules/ngx_http_js_module.so; + ``` + + +## Configuring PingFederate or PingOne for Enterprise + +**Note:** This guide uses the GUI provided with PingOne for Enterprise. It reflects the GUI at the time of initial publication, but the GUI is subject to change. The PingFederate user interace might also differ. Use this guide as a reference and adapt as necessary for the UI you are using. + +Create a new application for NGINX Plus: + +1. Log in to your Ping Identity account. The administrative dashboard opens automatically. In this guide, we show the PingOne for Enterprise dashboard, and for brevity refer simply to ”PingOne”. + +2. Click  APPLICATIONS  in the title bar, and on the **My Applications** page that opens, click **OIDC** and then the + Add Application button. + + + +3. The Add OIDC Application window pops up. Click the ADVANCED CONFIGURATION box, and then the  Next  button. + + + +4. In section 1 (PROVIDE DETAILS ABOUT YOUR APPLICATION), type a name in the **APPLICATION NAME** field and a short description in the **SHORT DESCRIPTION** field. Here, we're using nginx-plus-application and NGINX Plus. Choose a value from the **CATEGORY** drop‑down menu; here we’re using Information Technology. You can also add an icon if you wish. Click the  Next  button. + + + +5. In section 2 (AUTHORIZATION SETTINGS), perform these steps: + + 1. Under **GRANTS**, click both Authorization Code and Implicit.
      + 2. Under **CREDENTIALS**, click the + Add Secret button. PingOne creates a client secret and opens the **CLIENT SECRETS** field to display it, as shown in the screenshot. To see the actual value of the secret, click the eye icon.
      + 3. Click the  Next  button. + + + +6. In section 3 (SSO FLOW AND AUTHENTICATION SETTINGS): + + 1. In the START SSO URL field, type the URL where users access your application. Here we’re using https://example.com. + 2. In the **REDIRECT URIS** field, type the URI of the NGINX Plus instance including the port number, and ending in **/\_codexch**. Here we’re using https://my-nginx-plus.example.com:443/\_codexch (the full value is not visible in the screenshot). + + **Notes:** + + - For production, we strongly recommend that you use SSL/TLS (port 443). + - The port number is mandatory even when you're using the default port for HTTP (80) or HTTPS (443). + + + +7. In section 4 (DEFAULT USER PROFILE ATTRIBUTE CONTRACT), optionally add attributes to the required sub and idpid attributes, by clicking the + Add Attribute button. We’re not adding any in this example. When finished, click the  Next  button. + + + +8. In section 5 (CONNECT SCOPES), click the circled plus-sign on the OpenID Profile (profile) and OpenID Profile Email (email) scopes in the LIST OF SCOPES column. They are moved to the **CONNECTED SCOPES** column, as shown in the screenshot. Click the  Next  button. + + + +9. In section 6 (ATTRIBUTE MAPPING), map attributes from your identity repository to the claims available to the application. The one attribute you must map is **sub**, and here we have selected the value Email from the drop‑down menu (the screenshot is abridged for brevity). + + + + +10. In section 7 (GROUP ACCESS), select the groups that will have access to the application, by clicking the circled plus-sign on the corresponding boxes in the **AVAILABLE GROUPS** column. The boxes move to the **ADDED GROUPS** column. As shown in the screenshot we have selected the two default groups, Domain Administrators@directory and Users@directory. + + Click the  Done  button. + + + +11. You are returned to the **My Applications** window, which now includes a row for nginx-plus-application. Click the toggle switch at the right end of the row to the “on” position, as shown in the screenshot. Then click the “expand” icon at the end of the row, to display the application’s details. + + + + +12. On the page that opens, make note of the values in the following fields on the **Details** tab. You will add them to the NGINX Plus configuration in [Step 4 of _Configuring NGINX Plus_](#nginx-plus-variables). + + - **CLIENT ID** (in the screenshot, 28823604-83c5-4608-88da-c73fff9c607a) + - **CLIENT SECRETS** (in the screenshot, 7GMKILBofxb...); click on the eye icon to view the actual value + + + + +## Configuring NGINX Plus + +Configure NGINX Plus as the OpenID Connect relying party: + +1. Create a clone of the [nginx-openid-connect](https://github.com/nginxinc/nginx-openid-connect) GitHub repository. + + ```shell + git clone https://github.com/nginxinc/nginx-openid-connect + ``` + +2. Copy these files from the clone to **/etc/nginx/conf.d**: + + - **frontend.conf** + - **openid\_connect.js** + - **openid\_connect.server\_conf** + + +3. Get the URLs for the authorization endpoint, token endpoint, and JSON Web Key (JWK) file from the Ping Identity configuration. Run the following `curl` command in a terminal, piping the output to the indicated `python` command to output the entire configuration in an easily readable format. We've abridged the output to show only the relevant fields. + + The `` variable is the value in the **CLIENT ID** field that you noted in [Step 12 of _Configuring PingFederate or PingOne for Enterprise_](#ping-client-id-secrets). + + **Note:** This `curl` command is appropriate for Ping One for Enterprise. For PingFederate, you might need to replace `sso.connect.pingidentity.com` with the IP address of your local PingFederate server. + + ```shell + $ curl sso.connect.pingidentity.com//.well-known/openid-configuration | python -m json.tool + ... + { + "authorization_endpoint": "https://sso.connect.pingidentity.com/sso/as/authorization.oauth2", + ... + "jwks_uri": "https://sso.connect.pingidentity.com/sso/as/jwks", + ... + "token_endpoint": "https://sso.connect.pingidentity.com/sso/as/token.oauth2", + ... + } + ``` + + +4. In your preferred text editor, open **/etc/nginx/conf.d/frontend.conf**. Change the second parameter of each of the following [set](http://nginx.org/en/docs/http/ngx_http_rewrite_module.html#set) directives to the specified value: + + - `set $oidc_authz_endpoint` – Value of `authorization_endpoint` from [Step 3](#nginx-plus-urls) (in this guide, `https://sso.connect.pingidentity.com/sso/as/authorization.oauth2`) + - `set $oidc_token_endpoint` – Value of `token_endpoint` from [Step 3](#nginx-plus-urls) (in this guide, `https://sso.connect.pingidentity.com/sso/as/token.oauth2`) + - `set $oidc_client` – Value in the **CLIENT ID** field in [Step 12 of _Configuring PingFederate or PingOne for Enterprise_](#ping-client-id-secrets) (in this guide, `28823604-83c5-4608-88da-c73fff9c607a`) + - `set $oidc_client_secret` – Value in the **CLIENT SECRETS** field in [Step 12 of _Configuring PingFederate or PingOne for Enterprise_](#ping-client-id-secrets) (in this guide, `7GMKILBofxb...`) + - `set $oidc_hmac_key` – A unique, long, and secure phrase + +5. Configure the JWK file. The procedure depends on which version of NGINX Plus you are using. + + - In NGINX Plus R17 and later, NGINX Plus can read the JWK file directly from the URL reported as `jwks_uri` in [Step 3](#nginx-plus-urls). Change **/etc/nginx/conf.d/frontend.conf** as follows: + + 1. Comment out (or remove) the [auth_jwt_key_file](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) directive. + 2. Uncomment the [auth_jwt_key_request](http://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directive. (Its parameter, `/_jwks_uri`, refers to the value of the `$oidc_jwt_keyfile` variable, which you set in the next step.) + 3. Change the second parameter of the `set $oidc_jwt_keyfile` directive to the value reported in the `jwks_uri` field in [Step 3](#nginx-plus-urls) (in this guide, `https://sso.connect.pingidentity.com/sso/as/jwks`). + + - In NGINX Plus R16 and earlier, the JWK file must be on the local disk. (You can also use this method with NGINX Plus R17 and later if you wish.) + + 1. Copy the JSON contents from the JWK file named in the `jwks_uri` field in [Step 3](#nginx-plus-urls) (in this guide, `https://sso.connect.pingidentity.com/sso/as/jwks`) to a local file (for example, `/etc/nginx/my_ping_identity_jwk.json`). + 2. In **/etc/nginx/conf.d/frontend.conf**, change the second parameter of the `set $oidc_jwt_keyfile` directive to the local file path. + +6. Confirm that the user named by the [user](http://nginx.org/en/docs/ngx_core_module.html#user) directive in the NGINX Plus configuration (in **/etc/nginx/nginx.conf** by convention) has read permission on the JWK file. + + +## Testing + +In a browser, enter the address of your NGINX Plus instance and try to log in using the credentials of a user assigned to the application (see [Step 10 of _PingFederate or PingOne for Enterprise_](#ping-group-access)). + + + + +## Troubleshooting + +See the [**Troubleshooting**](https://github.com/nginxinc/nginx-openid-connect#troubleshooting) section at the nginx-openid-connect repository on GitHub. + +### Revision History + +- Version 2 (March 2020) – Updates to _Configuring NGINX Plus_ section +- Version 1 (January 2020) – Initial version (NGINX Plus Release 20) diff --git a/content/nginx/directives.md b/content/nginx/directives.md new file mode 100644 index 000000000..650e77b4c --- /dev/null +++ b/content/nginx/directives.md @@ -0,0 +1,10 @@ +--- +_build: + list: always +docs: DOCS-469 +title: NGINX Directives Index +url: /nginx/directives +weight: 700 +--- + + \ No newline at end of file diff --git a/content/nginx/fips-compliance-nginx-plus.md b/content/nginx/fips-compliance-nginx-plus.md new file mode 100644 index 000000000..2cbd58317 --- /dev/null +++ b/content/nginx/fips-compliance-nginx-plus.md @@ -0,0 +1,196 @@ +--- +description: null +docs: DOCS-470 +doctypes: +- concept +title: NGINX Plus FIPS Compliance +toc: true +weight: 600 +--- + +When used with a FIPS 140-2 validated build of OpenSSL operating in FIPS mode, NGINX Plus is compliant with the requirements of FIPS 140-2 (Level 1) with respect to the decryption and encryption of SSL/TLS‑encrypted network traffic. + +## Introduction + +[FIPS 140-2](https://csrc.nist.gov/publications/detail/fips/140/2/final) is a United States Federal Standard that relates to the integrity and security of cryptographic modules. FIPS 140-2 Level 1 relates specifically to software cryptographic modules and makes stipulations about the cryptographic algorithms that may be used and the self‑tests that must be conducted to verify their integrity. + +Several operating system vendors have obtained FIPS 140-2 Level 1 validation for the OpenSSL Cryptographic Module shipped with their respective operating systems: + +- [Canonical Ltd.: Ubuntu 18.04 OpenSSL Cryptographic Module](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4540) +- [Oracle Corporation: Oracle OpenSSL FIPS Provider](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4506) +- [Red Hat, Inc.: Red Hat Enterprise Linux 7 NSS Cryptographic Module](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4498) +- [SUSE, LLC: SUSE Linux Enterprise Server Kernel Crypto API Cryptographic Module](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4508) + +NGINX Plus uses the OpenSSL cryptographic module exclusively for all operations relating to the decryption and encryption of SSL/TLS and HTTP/2 traffic. + +When NGINX Plus is executed on an operating system where a FIPS‑validated OpenSSL cryptographic module is present and FIPS mode is enabled, NGINX Plus is compliant with FIPS 140-2 with respect to the decryption and encryption of SSL/TLS and HTTP/2 traffic. + +## Definition of Terms + +This statement uses the following terms: + +- **Cryptographic module**: The OpenSSL software, comprised of libraries of FIPS‑validated algorithms that can be used by other applications. + +- **Cryptographic boundary**: The operational functions that use FIPS‑validated algorithms. For NGINX Plus, the cryptographic boundary includes all functionality that is implemented by the [http_ssl](http://nginx.org/en/docs/http/ngx_http_ssl_module.html), [http_v2](http://nginx.org/en/docs/http/ngx_http_v2_module.html), [stream_ssl](http://nginx.org/en/docs/stream/ngx_stream_ssl_module.html), and [mail_ssl](http://nginx.org/en/docs/mail/ngx_mail_ssl_module.html) modules. These modules implement SSL and TLS operations for inbound and outbound connections which use HTTP, HTTP/2, TCP, and mail protocols. + +- **NGINX Plus**: The NGINX Plus software application developed by NGINX, Inc. and delivered in binary format from NGINX servers. + +- **FIPS mode**: When the operating system is configured to run in FIPS mode, the OpenSSL cryptographic module operates in a mode that has been validated to be in compliance with FIPS 140-2 Level 2. Most operating systems do not run in FIPS mode by default, so explicit configuration is necessary to enable FIPS mode. + +- **FIPS validated**: A component of the OpenSSL cryptographic module (the OpenSSL FIPS Object Module) is formally validated by an authorized certification laboratory. The validation holds if the module is built from source with no modifications to the source or build process. The implementation of FIPS mode that is present in operating system vendors’ distributions of OpenSSL contains this validated module. + +- **FIPS compliant**: NGINX Plus is compliant with FIPS 140-2 Level 1 within the cryptographic boundary when used with a FIPS‑validated OpenSSL cryptographic module on an operating system running in FIPS mode. + +## Verification of Correct Operation of NGINX Plus + +The following process describes how to deploy NGINX Plus in a FIPS‑compliant fashion and then verify that the FIPS operations are correctly performed. + +The process uses Red Hat Enterprise Linux (RHEL) version 7.4 as an example, and can be adapted for other Linux operating systems that can be configured in FIPS mode. + +### Step 1: Configure the Operating System to Use FIPS Mode + +For the purposes of the following demonstration, we installed and configured a RHEL 7.4 server. The [Red Hat FIPS documentation](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/chap-federal_standards_and_regulations#sec-Enabling-FIPS-Mode) explains how to switch the operating system between FIPS mode and non‑FIPS mode by editing the boot options and restarting the system. + +For instructions for enabling FIPS mode on other FIPS‑compliant Linux operating systems, see the operating system documentation (for example, [Oracle Linux](https://docs.oracle.com/cd/E52668_01/E54670/html/ol7-fips-enable.html), [Ubuntu](https://ubuntu.com/security/certifications/docs/fips-faq)). + +### Step 2: Verify the Operating System and OpenSSL Are in FIPS Mode + +You can verify that the operating system is in FIPS mode and that the version of OpenSSL provided by the operating system vendor is FIPS‑compliant by using the following tests. + +**Check operating system flags**: When the operating system is in FIPS mode, ```crypto.fips_enabled``` is ```1```; otherwise, it is ```0```: + +```shell +sudo sysctl –a | grep fips +crypto.fips_enabled = 1 +``` + +**Determine whether OpenSSL can perform SHA1 hashes**: This test verifies the correct operation of OpenSSL. The SHA1 hash algorithm is permitted in all modes, so failure of this command indicates that the OpenSSL implementation does not work properly: + +```shell +openssl sha1 /dev/null +SHA1(/dev/null)= da39a3ee5e6b4b0d3255bfef95601890afd80709 +``` + +**Determine whether OpenSSL can perform MD5 hashes**: This test verifies that OpenSSL is running in FIPS mode. MD5 is not a permitted hash algorithm in FIPS mode, so an attempt to use it fails: + +```shell +openssl md5 /dev/null +Error setting digest md5 +140647163811744:error:060800A3:digital envelope routines:EVP_DigestInit _ex:disabled for fips:digest.c:251: +``` + +If OpenSSL is not running in FIPS mode, the MD5 hash functions normally: + +```shell +openssl md5 /dev/null +MD5(/dev/null)= d41d8cd98f00b204e9800998ecf8427e +``` + +### Step 3: Install NGINX Plus on the Operating System + +Follow the [F5 NGINX documentation](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) to install NGINX Plus on the host operating system, either directly from the [NGINX Plus repository](https://account.f5.com/myf5), or by downloading the **nginx-plus** package (**rpm** or **deb** package) onto another system and manually installing it on the host operating system. + +**Verify that NGINX Plus is correctly installed**: Run the following command to confirm that NGINX Plus is installed and is using the expected OpenSSL cryptographic module: + +```shell +nginx -V +nginx version: nginx/1.15.2 (nginx-plus-r16) +built by gcc 4.8.5 20150623 (Red Hat 4.8.5-16) (GCC) +built with OpenSSL 1.0.2k-fips 26 Jan 2017 +``` + +Observe that the version number of the OpenSSL library includes the `–fips` suffix. This indicates that the library is FIPS‑validated, but does not confirm that it is running in FIPS mode. + +**Configure NGINX Plus to serve a simple SSL/TLS‑protected website**: Add the following simple configuration to NGINX Plus: + +```nginx +server { + listen 443 ssl; + + ssl_certificate /etc/nginx/ssl/test.crt; + ssl_certificate_key /etc/nginx/ssl/test.key; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } +} +``` + +If necessary, you can generate a self‑signed certificate for test purposes: + +```shell +mkdir -p /etc/nginx/ssl +openssl req -newkey rsa:2048 -nodes -keyout /etc/nginx/ssl/test.key -x509 -days 365 -out /etc/nginx/ssl/test.crt +``` + +Verify that you can access the website using HTTPS from a remote host. Connect to the NGINX IP address using the `openssl s_client` command, and enter the HTTP message `GET /`: + +```shell +(echo "GET /" ; sleep 1) | openssl s_client -connect :443 +``` + +Use `openssl s_client` for this test because it unambiguously confirms which SSL/TLS cipher was negotiated in the connection. After some debugging information (including the cipher selected), the body of the default “Welcome to nginx!” greeting page is displayed. + +### Step 4: Verify Compliance with FIPS 140-2 + +FIPS 140-2 disallows the use of some cryptographic algorithms, including the Camellia block cipher. We can test compliance with FIPS 140-2 by issuing SSL/TLS requests with known ciphers on another (non-FIPS-mode) server: + +#### RC4-MD5 + +```shell +(echo "GET /" ; sleep 1) | openssl s_client -connect :443 -cipher RC4-MD5 +``` + +This cipher is insecure and is disabled by NGINX Plus by default. The SSL handshake always fails. + +#### CAMELLIA-SHA + +```shell +(echo "GET /" ; sleep 1) | openssl s_client -connect :443 -cipher CAMELLIA256-SHA +``` + +This cipher is considered secure but is not permitted by the FIPS standard. The SSL handshake fails if the target system is compliant with FIPS 140-2, and succeeds otherwise. + +Note that if you attempt to issue the client request on a host running in FIPS mode, it fails because the OpenSSL client cannot use this cipher. + +#### AES256-SHA + +```shell +(echo "GET /" ; sleep 1) | openssl s_client -connect :443 -cipher AES256-SHA +``` + +This cipher is considered secure by NGINX Plus and is permitted by FIPS 140-2. The SSL handshake succeeds. + +## Which Ciphers Are Disabled in FIPS Mode? + +The FIPS 140-2 standard only permits a [subset of the typical SSL and TLS ciphers](https://csrc.nist.gov/csrc/media/publications/fips/140/2/final/documents/fips1402annexa.pdf). + +In the following test, the ciphers presented by NGINX Plus are surveyed using the [Qualys SSL server test](https://www.ssllabs.com/ssltest). In its default configuration, with the `ssl_ciphers HIGH:!aNULL:!MD5` directive, NGINX Plus presents the following ciphers to SSL/TLS clients: + +Ciphers presented by NGINX Plus to clients when in non-FIPS mode + +When FIPS mode is enabled on the host operating system, the two ciphers that use the Camellia block cipher (`TLS_RSA_WITH_CAMELLIA_128_CBC_SHA` and `TLS_RSA_WITH_CAMELLIA_256_CBC_SHA`) are removed: + +Ciphers presented by NGINX Plus to clients when in FIPS mode + +When you configure NGINX Plus with the `ssl_ciphers ALL` directive, NGINX Plus presents all the relevant ciphers available in the OpenSSL cryptographic module to the client. FIPS mode disables the following ciphers: + +- `TLS_ECDH_anon_WITH_RC4_128_SHA` +- `TLS_ECDHE_RSA_WITH_RC4_128_SHA` +- `TLS_RSA_WITH_CAMELLIA_128_CBC_SHA` +- `TLS_RSA_WITH_CAMELLIA_256_CBC_SHA` +- `TLS_RSA_WITH_IDEA_CBC_SHA` +- `TLS_RSA_WITH_RC4_128_MD5` +- `TLS_RSA_WITH_RC4_128_SHA` +- `TLS_RSA_WITH_SEED_CBC_SHA` + +## Conclusion + +NGINX Plus can be used to decrypt and encrypt SSL/TLS‑encrypted network traffic in deployments that require FIPS 140-2 Level 1 compliance. + +The process described above may be used to verify that NGINX Plus is operating in conformance with the FIPS 140-2 Level 1 standard. + + diff --git a/content/nginx/open-source-components.md b/content/nginx/open-source-components.md new file mode 100644 index 000000000..0c4a6035d --- /dev/null +++ b/content/nginx/open-source-components.md @@ -0,0 +1,324 @@ +--- +description: License information for open source components included in the NGINX + Plus software. +docs: DOCS-471 +doctypes: +- reference +title: Open Source Components +toc: true +weight: 500 +--- + +Open source components included in the F5 NGINX Plus (package name is `nginx-plus`) are: + + +- nginx/OSS 1.27.2, distributed under 2-clause BSD license. + + Homepage: + + Copyright © 2002-2021 Igor Sysoev + + Copyright © 2011-2024 NGINX, Inc. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +- Internal MD5 implementation based on Alexander Peslyak's public domain implementation: + + This is an OpenSSL-compatible implementation of the RSA Data Security, Inc. MD5 Message-Digest Algorithm (RFC 1321). + + Homepage: + + Author: Alexander Peslyak, better known as Solar Designer + + This software was written by Alexander Peslyak in 2001. No copyright is + claimed, and the software is hereby placed in the public domain. + In case this attempt to disclaim copyright and place the software in the + public domain is deemed null and void, then the software is + Copyright © 2001 Alexander Peslyak and it is hereby released to the + general public under the following terms: + - Redistribution and use in source and binary forms, with or without +modification, are permitted. + + - There's ABSOLUTELY NO WARRANTY, express or implied. + + (This is a heavily cut-down "BSD license".) + +- MurmurHash algorithm (version 2), distributed under MIT license. + + Homepage: + + Copyright © Austin Appleby + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation files + (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + - The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +- Components used in status monitoring dashboard v2 only +(`dashboard.html` in `nginx-plus` package) and distributed under MIT license: + + + - `@babel-core`, Babel compiler core (7.23.2) + + Homepage: + + Copyright © 2014-present Sebastian McKenzie and other contributors + + - `@babel/plugin-proposal-object-rest-spread`, produces spec-compliant code by using Babel's objectSpread helper (7.22.15). + + Homepage: + + - `@babel/plugin-transform-runtime`, makes helpers reference the module babel-runtime to avoid duplication across your compiled output (7.22.15). + + Homepage: + + - `@babel/preset-env`, a Babel preset for each environment (7.22.15). + + Homepage: + + - `@babel/preset-react`, a Babel preset for all React plugins (7.22.15). + + Homepage: + + - `autoprefixer`, a PostCSS plugin to parse CSS and add vendor prefixes to CSS rules (10.4.7) + + Homepage: + + Copyright © 2013 Andrey Sitnik + + - `babel-loader`, allows transpiling JavaScript files using Babel and webpack (9.1.3). + + Homepage: + + Copyright © 2014-2019 Luís Couto + + + - `babel-plugin-istanbul`, a babel plugin that adds istanbul instrumentation to ES6 code (6.1.1). + + Homepage: + + Copyright © 2016, Istanbul Code Coverage + + - `core-js`, a modular standard library for JavaScript (3.22.8). + + Homepage: + + Copyright © 2014-2022 Denis Pushkarev + + - `css-loader`, interprets `@import` and `url()` +like `import/require()` and will resolve them (6.8.1). + + Homepage: + + Copyright © JS Foundation and other contributors + + - `cssnano`, a modular minifier, built on top of the PostCSS ecosystem (5.1.11). + + Homepage: + + Copyright © Ben Briggs + + - `eslint`, a tool for identifying and reporting on patterns found in ECMAScript/JavaScript code (8.49.0). + + Homepage: + + Copyright OpenJS Foundation and other contributors, + + - `eslint-config-airbnb`, exports some ESLint configurations (19.0.4). + + Homepage: + + Copyright © 2012 Airbnb + + - `eslint-plugin-import`, supports linting of ES2015+ (ES6+) import/export syntax and prevent issues with misspelling of file paths and import names (2.28.1). + + Homepage: + + Copyright © 2015 Ben Mosher + + - `eslint-plugin-jsx-a11y`, static AST checker for accessibility rules on JSX elements (6.5.1). + + Homepage: + + Copyright © 2016 Ethan Cohen + + - `eslint-plugin-react`, react specific linting rules for eslint (7.33.2). + + Homepage: + + Copyright © 2014 Yannick Croissant + + - `eslint-webpack-plugin`, uses eslint to find and fix problems in the JavaScript code (4.0.1). + + Homepage: + + Copyright JS Foundation and other contributors + + - `history`, manage session history with JavaScript (4.10.1). + + Homepage: + + Copyright © React Training 2016-2020, Copyright © Remix Software 2020-2021 + + - `html-inline-css-webpack-plugin`, converts external stylesheet to embedded stylesheet, aka document stylesheet (1.11.1). + + Homepage: + + Copyright © 2018 Huang + + - `html-webpack-plugin`, simplifies creation of HTML files to serve your webpack bundles (5.5.0). + + Homepage: + + Copyright © JS Foundation and other contributors + + - `mini-css-extract-plugin`, extracts CSS into separate files (2.6.0). + + Homepage: + + Copyright © JS Foundation and other contributors + + - `postcss`, a tool for transforming styles with JS plugins (8.4.31). + + Homepage: + + Copyright 2013 Andrey Sitnik <> + + - `postcss-loader`, PostCSS loader for webpack (7.3.3). + + Homepage: + + Copyright © JS Foundation and other contributors + + - `postcss-url`, PostCSS plugin to rebase url(), inline or copy asset (10.1.3). + + Homepage: + + Copyright © 2014 Maxime Thirouin + + - `preact`, fast 3kb React alternative with the same ES6 API (10.7.3). + + Homepage: + + Copyright © 2015-present Jason Miller + + - `react-dev-utils`, utilities used by Create React App (12.0.1). + + Homepage: + + Copyright © 2013-present, Facebook, Inc. + + - `regenerator-runtime`, standalone runtime for Regenerator-compiled generator and async functions (0.13.9). + + Homepage: + + Copyright © 2014-present, Facebook, Inc. + + - `style-loader`, injects CSS into the DOM (3.3.1). + + Homepage: + + Copyright © JS Foundation and other contributors + + - `webpack`, a bundler for javascript and friends (5.88.2). + + Homepage: + + Copyright © JS Foundation and other contributors + + - `webpack-cli`, provides the interface of options webpack uses in its configuration file (4.10.0). + + Homepage: + + Copyright © JS Foundation and other contributors + + - `whatwg-fetch`, a window.fetch JavaScript polyfill (2.0.4). + + Homepage: + + Copyright © 2014-2016 GitHub, Inc. + + The MIT License + + Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + - The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +- Components used in status monitoring dashboard v2 only (`dashboard.html` in `nginx-plus` package) and distributed under 3-clause BSD license and Apache 2.0 license: + + - `babel-plugin-react-css-modules`, transforms styleName to className using compile time CSS module resolution (3.4.2), distributed under 3-clause BSD license. + + Homepage: + + Copyright © 2016, Gajus Kuizinas () + + All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + - Neither the name of the Gajus Kuizinas () nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANUARY BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + - `npm-font-open-sans`, Open Sans font family - incl. usage of CSS, SCSS, LESS (1.1.0), distributed under Apache 2.0 license. + + Homepage: + + Copyright © Steve Matteson + + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +Optional add-on and third-party modules provided with NGINX Plus may include +additional open-source components. The licenses for these components are included in the installation package +for each module. diff --git a/content/nginx/releases.md b/content/nginx/releases.md new file mode 100644 index 000000000..51a3ef6e2 --- /dev/null +++ b/content/nginx/releases.md @@ -0,0 +1,1757 @@ +--- +description: Release information for F5 NGINX Plus, a complete application delivery platform, + including new features and a list of supported platforms. +docs: DOCS-472 +doctypes: +- concept +title: Releases +toc: true +weight: 300 +--- + + +## Support for Current and Previous Releases + +NGINX provides technical support for F5 NGINX Plus releases for 24 months from the initial date of each release. +With each new NGINX Plus release, the previously released version enters End of Software Development (EoSD). +We do not issue updates for releases that have reached EoSD. For this reason, we advise customers to run the most recent release. +The initial release dates for NGINX Plus are noted in this document. +New releases are announced on the [NGINX Product Support Announcements](https://interact.f5.com/Customer-Preference-Center.html) mailing list. + + +## NGINX Plus Release 33 (R33) +_19 November 2024_
      +_Based on NGINX Open Source 1.27.2_ + +NGINX Plus R33 is a feature release: + +- Licensing: Each NGINX Plus instance now requires a JWT license file. The JWT must be obtained from [MyF5](https://account.f5.com/myf5) and is expected to be located at `/etc/nginx/` for Linux or +`/usr/local/etc/nginx/` for FreeBSD or at the path specified by the [`license_token`](https://nginx.org/en/docs/ngx_mgmt_module.html#license_token) in the [`mgmt`](https://nginx.org/en/docs/ngx_mgmt_module.html) context. + +- NGINX usage reporting: Usage report is sent to F5 licensing endpoint [every hour](https://nginx.org/en/docs/ngx_mgmt_module.html#usage_report) using the [secure](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_verify) connection. The initial usage report should be sent once NGINX Plus starts after installation or upgrade to R33. If the initial usage report is not received by the endpoint, NGINX Plus will stop processing traffic. A 180-day grace period can be [enabled](https://nginx.org/en/docs/ngx_mgmt_module.html#enforce_initial_report) to submit the initial usage report. + + Optionally, for network-restricted environments, reporting can be [configured](https://nginx.org/en/docs/ngx_mgmt_module.html#usage_report) to [NGINX Instance Manager]({{< relref "nim/index.md" >}}) from which the report can be sent to F5 licensing endpoint. + + For more information about licensing and usage reporting, see [About subscription licenses]({{< relref "solutions/about-subscription-licenses.md" >}}) article and [`ngx_mgmt_module`](https://nginx.org/en/docs/ngx_mgmt_module.html) module documentation. + +- OCSP stapling support and client certificate validation with OCSP +in the [stream](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html) module with the +[`ssl_ocsp`](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_ocsp) and [`ssl_ocsp_responder`](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_ocsp_responder) directives. + +- SSL key logging with the +`ssl_key_log` directive for [http](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_key_log), [stream](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_key_log), +[proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_key_log), [grpc](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_key_log), [uwsgi](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_key_log) that allows logging SSL keys created during client and upstream connections to the file. +The argument is a file name in the `SSLKEYLOGFILE` format compatible with Wireshark. + +- SSL Certificate Caching: Fixed loading of trusted CA bundles containing entries with duplicate Distinguished Name (DN). + +- Change: the [`ssl_client_certificate`](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_client_certificate) directive is not required for client SSL certificates verification. + +- Response trailers support in proxy with the [`proxy_pass_trailers`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_trailers) directive that allows passing trailer fields from a proxied server to a client. + +- The [NGINX JavaScript]({{< relref "nginx/admin-guide/dynamic-modules/nginscript.md" >}}) module was updated to version [0.8.7](https://nginx.org/en/docs/njs/changes.html#njs0.8.7), featuring QuickJS [runtime support](https://nginx.org/en/docs/njs/engine.html). + +- Added initial support for Post Quantum Cryptography. + +NGINX Plus R33 is supported on: + +- AlmaLinux 8, 9 +- Alpine Linux 3.17, 3.18, 3.19, 3.20 +- Amazon Linux 2 LTS, 2023 +- Debian 11, 12 +- FreeBSD 13, 14 +- Oracle Linux 8.1+, 9 +- RHEL 8.1+, 9.0+ +- Rocky Linux 8, 9 +- SUSE Linux Enterprise Server 12, 15 SP5+ +- Ubuntu 20.04 LTS, 22.04 LTS, 24.04 LTS + +**Notes:** + +- Alpine Linux 3.16 is removed +- Alpine Linux 3.17 is deprecated +- Alpine Linux 3.20 is new in this release +- CentOS 7.4+ is removed +- RHEL 7.4+ is removed +- Oracle Linux 7.4+ is removed +- SUSE Linux Enterprise Server 12 is deprecated +- support for s390x architecture removed +- the [Lua]({{< relref "nginx/admin-guide/dynamic-modules/lua.md" >}}) module is no longer available for SUSE Linux Enterprise Server 12 + +More information: [Announcing NGINX Plus R33](https://community.f5.com/kb/technicalarticles/announcing-nginx-plus-r33-release/336403) + + +### NGINX Plus R33 Update + +This is a bugfix release for NGINX Plus R33. + +NGINX Plus R33 P1
      +_4 December 2024_ + +- Resolved an issue related to product code detection on Azure Marketplace VMs. + + + +## NGINX Plus Release 32 (R32) +_29 May 2024_
      +_Based on NGINX Open Source 1.25.5_ + +NGINX Plus R32 is a feature release: + +- SSL certificate caching that improves the NGINX startup time and memory usage in cases of configurations with large number of locations with relatively small number of unique certificate/key pairs + +- The [`stream_pass`](https://nginx.org/en/docs/stream/ngx_stream_pass_module.html) module that allows passing the accepted connection directly to any configured listening socket in `http`, `stream`, `mail`, and other similar modules + +- NGINX Plus [official container images](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-docker/) + +- [Virtual servers](http://nginx.org/en/docs/stream/ngx_stream_core_module.html#server_name) in the [`stream`](http://nginx.org/en/docs/stream/ngx_stream_core_module.html) module + +- The `deferred`, `accept_filter`, and `setfib` parameters of the [listen](http://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) directive in the [`stream`](http://nginx.org/en/docs/stream/ngx_stream_core_module.html) module + +- Cache line size detection for some architectures + +- Security fixes: + + - Heap Overflow w/ write ([CVE-2024-32760](https://my.f5.com/manage/s/article/K000139609)): Undisclosed HTTP/3 encoder instructions can cause NGINX worker processes to terminate or cause other possible impacts + + - Stack Overflow / Use after free ([CVE-2024-31079](https://my.f5.com/manage/s/article/K000139611)): Undisclosed HTTP/3 requests can cause NGINX worker processes to terminate or cause other possible impacts. This attack requires that a request be specifically timed during the connection draining process, which the attacker has no visibility and limited influence over + + - Null Pointer Dereference w/ Empty Header ([CVE-2024-35200](https://my.f5.com/manage/s/article/K000139612)): Undisclosed HTTP/3 requests can cause NGINX worker processes to terminate or cause other possible impacts + + - Memory Disclosure during QUIC handshake ([CVE-2024-34161](https://my.f5.com/manage/s/article/K000139627)): When the network infrastructure supports a Maximum Transmission Unit (MTU) of 4096 or greater without fragmentation, undisclosed QUIC messages can cause NGINX worker processes to terminate or cause leakage of previously freed memory + +- Bugfixes: + + - in the [MQTT Filter](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html) module: malformed packets when using default properties + + - in the [zone_sync](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html) module: memory leak on configuration reload + + - Unexpected connection closure while using 0-RTT in QUIC + + - Connections with pending AIO operations might be closed prematurely during graceful shutdown of old worker processes + + - Socket leak alerts no longer logged when fast shutdown was requested after graceful shutdown of old worker processes + + - A socket descriptor error, a socket leak, or a segmentation fault in a worker process (for SSL proxying) might occur if AIO was used in a subrequest + + - A segmentation fault might occur in a worker process if SSL proxying was used along with the [image_filter](https://nginx.org/en/docs/http/ngx_http_image_filter_module.html) directive and errors with code 415 were redirected with the [error_page](https://nginx.org/en/docs/http/ngx_http_core_module.html#error_page) directive + + - Bugfixes and improvements in HTTP/3 + +- New features and bugfixes in njs: + + - setting the `Server `header for outgoing header + + - QuickJS engine support in CLI + +NGINX Plus R32 is supported on: + +- AlmaLinux 8, 9 +- Alpine Linux 3.16, 3.17, 3.18, 3.19 +- Amazon Linux 2 LTS, 2023 +- CentOS 7.4+ +- Debian 11, 12 +- FreeBSD 13, 14 +- Oracle Linux 7.4+, 8.1+, 9 +- RHEL 7.4+, 8.1+, 9.0+ +- Rocky Linux 8, 9 +- SUSE Linux Enterprise Server 12 SP5, 15 SP2 +- Ubuntu 20.04 LTS, 22.04 LTS, 24.04 LTS + +**Notes:** + +- Ubuntu 24.04 LTS is new in this release +- CentOS 7 is deprecated +- RHEL 7 is deprecated +- Oracle Linux 7 is deprecated +- FreeBSD 12 is removed +- [OpenTracing dynamic module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/opentracing/) (package name is `nginx-plus-module-opentracing-module`) is deprecated +- [ModSecurity WAF dynamic module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginx-waf/) (package name is `nginx-plus-module-modsecurity`) reached end of support and is no longer available + +More information: [Announcing NGINX Plus R32](https://www.f5.com/company/blog/nginx/announcing-NGINX-plus-R32) + + + +### NGINX Plus R32 Update + +This is a security release for NGINX Plus R32. + +NGINX Plus R32 P1
      +_14 August 2024_ + +- Security: + + - In the [MQTT Filter](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html) module, undisclosed requests can cause an increase in memory resource utilization ([CVE-2024-39792](https://my.f5.com/manage/s/article/K000140108)) + + - In the [MP4](https://nginx.org/en/docs/http/ngx_http_mp4_module.html) module, a specially crafted `mp4` file can cause NGINX worker memory over-read resulting in its termination by using a specially crafted `mp4` file ([CVE-2024-7347](https://my.f5.com/manage/s/article/K000140529)) + +- Various fixes in SSL certificate caching + + + +## NGINX Plus Release 31 (R31) +_19 December 2023_
      +_Based on NGINX Open Source 1.25.3_ + +NGINX Plus R31 is a feature release: + +- [Native usage reporting](https://nginx.org/en/docs/ngx_mgmt_module.html) +of NGINX Plus installations to [NGINX Instance Manager](https://docs.nginx.com/nginx-management-suite/nim/) + +- The [$upstream_last_server_name](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_last_server_name) variable that keeps the name of the last selected upstream server and allows passing it to the proxied server through SNI + +- Notable startup speedup when using a large number of locations + +- [HTTP/3 and QUIC](https://nginx.org/en/docs/http/ngx_http_v3_module.html) features and bugfixes: + + - Path MTU Discovery (PMTUD) feature + + - support for `TLS_AES_128_CCM_SHA256` cipher suite + + - support for [`server_tokens`](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens) with variables + + - bugfixes and improvements + +- New features in njs: + + - the `js_periodic` directive for [http](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_periodic) and [stream](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_periodic) that allows specifying a JS handler to run at regular intervals + + - the `Console` object: +[`error()`](https://nginx.org/en/docs/njs/reference.html#console_error), +[`info()`](https://nginx.org/en/docs/njs/reference.html#console_info), +[`log()`](https://nginx.org/en/docs/njs/reference.html#console_log), +[`time()`](https://nginx.org/en/docs/njs/reference.html#console_time), +[`timeEnd()`](https://nginx.org/en/docs/njs/reference.html#console_time_end), +[`warn()`](https://nginx.org/en/docs/njs/reference.html#console_warn) methods + + - the [`fs()`](https://nginx.org/en/docs/njs/reference.html#njs_api_fs) module: the [`fs.existsSync()`](https://nginx.org/en/docs/njs/reference.html#fs_existssync) method + + - [shared dictionary](https://nginx.org/en/docs/njs/reference.html#ngx_shared): the [`items()`](https://nginx.org/en/docs/njs/reference.html#dict_items) method + +- MQTT bugfixes and improvements: + + - the `CONNECT` message was rejected when a password was not provided + + - the `CONNECT` message parsing is stopped when the message length is less than the number of bytes received + + - added the `Will` topic and `Will` payload for MQTT Version 3.1.1 if the `CONNECT` message is rewritten + +- Various bugfixes and improvements: + + - the `Status` response header line with an empty reason phrase from the backend was handled incorrectly + + - memory leak during reconfiguration when using the PCRE2 library + + - improved detection of misbehaving clients when using HTTP/2 + +- The [OpenTracing](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/opentracing/) module introduced in NGINX Plus [R18](#r18) is deprecated, it recommended to use the [OpenTelemetry Distributed Tracing](https://nginx.org/en/docs/ngx_otel_module.html) module that incorporates all the features of the OpenTracing module. + +NGINX Plus R31 is supported on: + +- AlmaLinux 8, 9 +- Alpine Linux 3.16, 3.17, 3.18, 3.19 +- Amazon Linux 2 LTS, 2023 +- CentOS 7.4+ +- Debian 11, 12 +- FreeBSD 12.1+, 13, 14 +- Oracle Linux 7.4+, 8.1+, 9 +- RHEL 7.4+, 8.1+, 9.0+ +- Rocky Linux 8, 9 +- SUSE Linux Enterprise Server 12 SP5, 15 SP2 +- Ubuntu 20.04 LTS, 22.04 LTS + +**Notes:** + +- Alpine Linux 3.19 is new in this release +- FreeBSD 14 is new in this release +- Alpine Linux 3.15 is removed +- FreeBSD 12 is deprecated +- OpenTracing dynamic module (package name is `nginx-plus-module-opentracing-module`) is deprecated + +More information: [Announcing NGINX Plus R31](https://www.nginx.com/blog/nginx-plus-r31-released/) + + +### NGINX Plus R31 Update + +This is an improvement release for NGINX Plus R31. + +NGINX Plus R31 P1
      +_14 February 2024_ + +- Security: a segmentation fault might occur in a worker process if HTTP/3 was used ([CVE-2024-24989](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-24989), [CVE-2024-24990](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-24990)) + +- Management module: fixed a potential crash that might happen while using a system resolver + +More information: [Updating NGINX for the Vulnerabilities in the HTTP/3 Module](https://www.nginx.com/blog/updating-nginx-for-the-vulnerabilities-in-the-http-3-module/) + + +NGINX Plus R31 P2
      +_29 May 2024_ + +- Security: + + - Heap Overflow w/ write ([CVE-2024-32760](https://my.f5.com/manage/s/article/K000139609)): Undisclosed HTTP/3 encoder instructions can cause NGINX worker processes to terminate or cause other possible impacts + + - Stack Overflow / Use after free ([CVE-2024-31079](https://my.f5.com/manage/s/article/K000139611)): Undisclosed HTTP/3 requests can cause NGINX worker processes to terminate or cause other possible impacts. This attack requires that a request be specifically timed during the connection draining process, which the attacker has no visibility and limited influence over + + - Null Pointer Dereference w/ Empty Header ([CVE-2024-35200](https://my.f5.com/manage/s/article/K000139612)): Undisclosed HTTP/3 requests can cause NGINX worker processes to terminate or cause other possible impacts + + - Memory Disclosure during QUIC handshake ([CVE-2024-34161](https://my.f5.com/manage/s/article/K000139627)): When the network infrastructure supports a Maximum Transmission Unit (MTU) of 4096 or greater without fragmentation, undisclosed QUIC messages can cause NGINX worker processes to terminate or cause leakage of previously freed memory + + +NGINX Plus R31 P3
      +_14 August 2024_ + +- Security: + + - In the [MQTT Filter](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html) module, undisclosed requests can cause an increase in memory resource utilization ([CVE-2024-39792](https://my.f5.com/manage/s/article/K000140108)) + + - In the [MP4](https://nginx.org/en/docs/http/ngx_http_mp4_module.html) module, a specially crafted `mp4` file can cause NGINX worker memory over-read resulting in its termination by using a specially crafted `mp4` file ([CVE-2024-7347](https://my.f5.com/manage/s/article/K000140529)) + + + + +## NGINX Plus Release 30 (R30) +_15 August 2023_
      +_Based on NGINX Open Source 1.25.1_ + +NGINX Plus R30 is a feature release: + +- Native support for [HTTP/3 and QUIC](https://nginx.org/en/docs/http/ngx_http_v3_module.html) + +- Version [`9`](https://nginx.org/en/docs/http/ngx_http_api_module.html#compatibility) of the [API](https://nginx.org/en/docs/http/ngx_http_api_module.html): + + - Per-worker connection statistics including accepted, dropped, active and idle connections, total and current requests + +- The [Prometheus-njs module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/prometheus-njs/) now supports version [`9`](https://nginx.org/en/docs/http/ngx_http_api_module.html) of the [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) + +- DNS reload optimization: now DNS name expiry time for dynamically-resolved upstream hosts is preserved across reloads + +- The new [`mqtt_buffers`](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html#mqtt_buffers) directive in the [MQTT Filter](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html) module that specifies the number of buffers allocated per connection, the directive also supersedes the [`mqtt_rewrite_buffer_size`](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html#mqtt_buffer_size) directive + +- The [`ssl`](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl) directive deprecated in NGINX Plus Release 16 was removed, the `ssl` parameter of the [`listen`](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive should be used instead + +- The new [`http2`](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2) directive +obsoletes the `http2` parameter of the [`listen`](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive which is now deprecated + +- HTTP/2 server push removed, the [`http2_push`](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_push), [`http2_push_preload`](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_push_preload), [`http2_max_concurrent_pushes`](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_concurrent_pushesd) directives are made obsolete + +- Optional NGINX diagnostic scripts that collect the data +required for troubleshooting are available as a separate download package + +- New features in [njs](http://nginx.org/en/docs/njs): + + - global NGINX properties: [`ngx.build`](https://nginx.org/en/docs/njs/reference.html#ngx_build), [`ngx.conf_file_path`](https://nginx.org/en/docs/njs/reference.html#ngx_conf_file_path), [`ngx.error_log_path`](https://nginx.org/en/docs/njs/reference.html#ngx_error_log_path), [`ngx.prefix`](https://nginx.org/en/docs/njs/reference.html#ngx_prefix), [`ngx.version`](https://nginx.org/en/docs/njs/reference.html#ngx_version), [`ngx.version_number`](https://nginx.org/en/docs/njs/reference.html#ngx_version_number), [`ngx.worker_id`](https://nginx.org/en/docs/njs/reference.html#ngx_worker_id) + + - the `js_shared_dict_zone` directive for [http](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_shared_dict_zone) and [stream](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_shared_dict_zone) that allows declaring a dictionary shared between worker processes + + - [ES13-compliant](https://nginx.org/en/docs/njs/compatibility.html) `Array` methods: `from()`, `toSorted()`, `toSpliced()`, `toReversed()` + + - [`CryptoKey`](https://nginx.org/en/docs/njs/reference.html#cryptokey) properties in `WebCrypto` API: [`algorithm`](https://nginx.org/en/docs/njs/reference.html#cryptokey_alg), [`extractable`](https://nginx.org/en/docs/njs/reference.html#cryptokey_extractable), [`type`](https://nginx.org/en/docs/njs/reference.html#cryptokey_type), [`usages`](https://nginx.org/en/docs/njs/reference.html#cryptokey_usages) + +- The GeoIP2 module is no longer available for Amazon Linux 2 as the EPEL repository doesn't provide the `libmaxminddb` library required to build the module + +NGINX Plus R30 is supported on: + +- AlmaLinux 8, 9 +- Alpine Linux 3.16, 3.17, 3.18 +- Amazon Linux 2 LTS, 2023 +- CentOS 7.4+ +- Debian 11, 12 +- FreeBSD 12.1+, 13 +- Oracle Linux 7.4+, 8.1+, 9 +- RHEL 7.4+, 8.1+, 9.0+ +- Rocky Linux 8, 9 +- SUSE Linux Enterprise Server 12 SP5, 15 SP2 +- Ubuntu 20.04 LTS, 22.04 LTS + +**Notes:** + +- Alpine Linux 3.18 is new in this release +- Debian 12 is new in this release +- Alpine Linux 3.15 is deprecated +- Alpine Linux 3.14 is removed +- Ubuntu 18.04 is removed +- The GeoIP2 dynamic module (package name is `nginx-plus-module-geoip2`) for Amazon Linux 2 is no longer provided + +More information: [Announcing NGINX Plus R30](https://www.nginx.com/blog/nginx-plus-r30-released/) + + + +### NGINX Plus R30 Update + +This is an improvement release for NGINX Plus R30. + +NGINX Plus R30 P1
      +_11 October 2023_ + +- Additional protection against HTTP/2 Rapid Reset Attack vulnerability ([CVE-2023-44487](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-44487)) that may affect NGINX only when it is configured with the [keepalive requests](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests) value substantially higher than the default value. Limitations in HTTP/2 protocol allow clients to produce a higher RPS rate than expected from a configured HTTP/2 [max concurrent streams](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_concurrent_streams) setting which can be exploited to trigger a Denial-of-Service attack. + +More information: [HTTP/2 Rapid Reset Attack Impacting NGINX Products](https://www.nginx.com/blog/http-2-rapid-reset-attack-impacting-f5-nginx-products/) + + + +NGINX Plus R30 P2
      +_14 February 2024_ + +- Security: a segmentation fault might occur in a worker process if HTTP/3 was used ([CVE-2024-24990](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-24990)) + +More information: [Updating NGINX for the Vulnerabilities in the HTTP/3 Module](https://www.nginx.com/blog/updating-nginx-for-the-vulnerabilities-in-the-http-3-module/) + + + +## NGINX Plus Release 29 (R29) +_02 May 2023_
      +_Based on NGINX Open Source 1.23.4_ + +NGINX Plus R29 is a feature release: + +- MQTT messaging protocol support with the [MQTT Preread](https://nginx.org/en/docs/stream/ngx_stream_mqtt_preread_module.html) and [MQTT Filter](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html) modules + +- [SAML Authentication reference implementation](https://github.com/nginxinc/nginx-saml) based on native njs [XML support](http://nginx.org/en/docs/njs/reference.html#xml) + +- OpenTelemetry Distributed Tracing [module](https://nginx.org/en/docs/ngx_otel_module.html), distributed in NGINX Plus packages (package name is `nginx-plus-module-otel`) and is available as a [dynamic module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/opentelemetry/) + +- Experimental support for [HTTP/3 and QUIC](https://nginx.org/en/docs/http/ngx_http_v3_module.html), distributed in NGINX Plus packages (package name is `nginx-plus-quic`) + +- TLS 1.3 is enabled by default (the `TLSv1.3` parameter of the [ssl_protocols](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) directive) + +- The [internal_redirect](https://nginx.org/en/docs/http/ngx_http_internal_redirect_module.html#internal_redirect) directive and module that allows internal redirects after checking [request](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html) and [connection](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html) processing limits, and [access](https://nginx.org/en/docs/http/ngx_http_access_module.html) limits + +- New feature in [OpenID Connect reference implementation](https://github.com/nginxinc/nginx-openid-connect): support for access token + +- The [Prometheus-njs module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/prometheus-njs/) now supports version [`8`](https://nginx.org/en/docs/http/ngx_http_api_module.html#compatibility) of the [API](https://nginx.org/en/docs/http/ngx_http_api_module.html), including SSL extended statistics for each HTTP [upstream](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream) and stream [upstream](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_upstream), SSL extended statistics for each HTTP [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_server_zone) and stream [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_server_zone), and extended statistics for [SSL](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_ssl_object) + +- The NGINX JavaScript ([njs](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/)) module for NGINX Plus was updated to version [0.7.12](http://nginx.org/en/docs/njs/changes.html#njs0.7.12), featuring extended [Fetch API](https://nginx.org/en/docs/njs/reference.html#ngx_fetch) and [WebCrypto API](http://nginx.org/en/docs/njs/reference.html#builtin_crypto), [XML module](https://nginx.org/en/docs/njs/reference.html#xml_node) to parse and modify XML documents, [Zlib module](https://nginx.org/en/docs/njs/reference.html#zlib) to support compression + +NGINX Plus R29 is supported on: + +- AlmaLinux 8, 9 +- Alpine Linux 3.15, 3.16, 3.17 +- Amazon Linux 2 LTS, 2023 +- CentOS 7.4+ +- Debian 11 +- FreeBSD 12.1+, 13 +- Oracle Linux 7.4+, 8.1+, 9 +- RHEL 7.4+, 8.1+, 9.0+ +- Rocky Linux 8, 9 +- SUSE Linux Enterprise Server 12 SP5, 15 SP2 +- Ubuntu 20.04 LTS, 22.04 LTS + +**Notes:** + +- Amazon Linux 2023 is new in this release +- Alpine Linux 3.14 is deprecated +- Ubuntu 18.04 is deprecated +- Alpine Linux 3.13 is removed +- The ModSecurity dynamic module (package name is `nginx-plus-module-modsecurity`) is no longer supported + +More information: [Announcing NGINX Plus R29](https://www.nginx.com/blog/nginx-plus-r29-released/) + + + +### NGINX Plus R29 Update + +This is an improvement release for NGINX Plus R29. + +NGINX Plus R29 P1
      +_11 October 2023_ + +- Additional protection against HTTP/2 Rapid Reset Attack vulnerability ([CVE-2023-44487](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-44487)) that may affect NGINX only when it is configured with the [keepalive requests](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests) value substantially higher than the default value. Limitations in HTTP/2 protocol allow clients to produce a higher RPS rate than expected from a configured HTTP/2 [max concurrent streams](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_concurrent_streams) setting which can be exploited to trigger a Denial-of-Service attack. + +More information: [HTTP/2 Rapid Reset Attack Impacting NGINX Products](https://www.nginx.com/blog/http-2-rapid-reset-attack-impacting-f5-nginx-products/) + + + +## NGINX Plus Release 28 (R28) +_29 November 2022_
      +_Based on NGINX Open Source 1.23.2_ + +NGINX Plus R28 is a feature release: + +- API version 8 update: + + - SSL extended statistics for each HTTP [upstream](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream) and stream [upstream](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_upstream) + + - SSL extended statistics for each HTTP [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_server_zone) and stream [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_server_zone) + + - Extended statistics for [SSL](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_ssl_object) endpoint + +- PROXY protocol v2 TLV variables for Amazon Web Services, Google Cloud Platform, and Microsoft Azure in [HTTP](https://nginx.org/en/docs/http/ngx_http_proxy_protocol_vendor_module.html) and [stream](https://nginx.org/en/docs/stream/ngx_stream_proxy_protocol_vendor_module.html) + +- The `proxy_protocol_tlv_` variable for [HTTP](http://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_tlv_) and [stream](http://nginx.org/en/docs/stream/ngx_stream_core_module.html#var_proxy_protocol_tlv_) that can keep different TLV types from the PROXY protocol header including SSL TLV types + +- [Sticky cookie](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky) load-balancing method now can accept variables in the [SameSite](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_samesite) attribute in addition to `Strict`, `Lax`,or `None` values + +- NGINX Plus live activity monitoring dashboard now supports HTTP status code statistics and extended SSL statistics for upstreams and server zones + +- TLS session tickets encryption keys are now automatically rotated when using shared memory in the [`ssl_session_cache`](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) directive + +- Looking up of IPv4 addresses while resolving now can be disabled with the `ipv4=off` parameter of the [`resolver`](http://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) directive. + +- Changes in handling multiple headers with identical names. + + - Most of the known duplicate upstream response headers are now ignored with a warning. + + - Duplicate `Content-Length` and `Transfer-Encoding` headers are now rejected as well as the responses with invalid `Content-Length` or `Transfer-Encoding` headers, or if both `Content-Length` and `Transfer-Encoding` are present in the response. + +NGINX Plus R28 is supported on: + +- AlmaLinux 8, 9 +- Alpine Linux 3.13, 3.14, 3.15, 3.16, 3.17 +- Amazon Linux 2 LTS +- CentOS 7.4+ +- Debian 11 +- FreeBSD 12.1+, 13 +- Oracle Linux 7.4+, 8.1+, 9 +- RHEL 7.4+, 8.1+, 9.0+ +- Rocky Linux 8, 9 +- SUSE Linux Enterprise Server 12 SP5, 15 SP2 +- Ubuntu 18.04 LTS, 20.04 LTS, 22.04 LTS + +**Notes:** + +- AlmaLinux 8 and 9 are new in this release +- Alpine Linux 3.17 is new in this release +- Oracle Linux 9 is new in this release +- Rocky Linux 8 and 9 are new in this release +- Debian 10 is removed +- Alpine Linux 3.13 is deprecated + +More information: [Announcing NGINX Plus R28](https://www.nginx.com/blog/nginx-plus-r28-released/) + + + +## NGINX Plus Release 27 (R27) +_28 June 2022_
      +_Based on NGINX Open Source 1.21.6_ + +NGINX Plus R27 is a feature release: + +- API version 8: + + - SSL statistics for each HTTP [upstream](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream) and stream [upstream](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_upstream) + + - SSL statistics for each HTTP [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_server_zone) and stream [server zone](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_server_zone) + +- JWT Authentication: error code can be customized with the `error` parameter of the [`auth_jwt_require`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_require) directive if any additional condition of JWT validation fails + +- HTTP health checks: the [`keepalive_time`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_keepalive_time) parameter of the [`health_check`](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive +that enables keepalive connections for health checks and specifies the time +during which requests can be processed through one keepalive connection + +- The [Prometheus-njs module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/prometheus-njs/) now supports version [`7`](https://nginx.org/en/docs/http/ngx_http_api_module.html#compatibility) of the [API](https://nginx.org/en/docs/http/ngx_http_api_module.html), including `/stream/limit_conns/`, `/http/limit_conns/`, `/http/limit_req/` data, and HTTP status code statistics for [upstreams](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream), [server zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_server_zone) and [location zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_location_zone) + +- [kTLS](https://www.nginx.com/blog/improving-nginx-performance-with-kernel-tls/) is now also available on RHEL 9.0 and Ubuntu 22.04 + + +NGINX Plus R27 is supported on: + +- Alpine Linux 3.13, 3.14, 3.15, 3.16 +- Amazon Linux 2 LTS +- CentOS 7.4+ +- Debian 10, 11 +- FreeBSD 12.1+, 13 +- Oracle Linux 7.4+, 8.1+ +- RHEL 7.4+, 8.1+, 9.0+ +- SUSE Linux Enterprise Server 12 SP5, 15 SP2 +- Ubuntu 18.04 LTS, 20.04 LTS, 22.04 LTS + +**Notes:** + +- Alpine Linux 3.16 is new in this release +- RHEL 9.0+ is new in this release +- Ubuntu 22.04 LTS is new in this release +- Debian 10 is deprecated +- Alpine 3.12 is no longer supported +- CentOS 8 is no longer supported +- Power 8 architecture is no longer supported + +More information: [Announcing NGINX Plus R27](https://www.nginx.com/blog/nginx-plus-r27-released/) + + +### NGINX Plus R27 Update + +This is a bug‑fix release for NGINX Plus R27. + +NGINX Plus R27 P1
      +_19 October 2022_ + +- In HLS ([CVE-2022-41743](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41743)) and MP4 ([CVE-2022-41741](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41741)) modules when processing specially crafted video files a memory corruption, or a memory disclosure in MP4 module ([CVE-2022-41742](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41742)) could happen. + + +## NGINX Plus Release 26 (R26) +_15 February 2022_
      +_Based on NGINX Open Source 1.21.5_ + +NGINX Plus R26 is a feature release: + +- JWT key caching with the +[`auth_jwt_key_cache`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_cache) directive + +- Enhanced ALPN support with the [`ssl_alpn`](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_alpn) directive for stream, and the `$ssl_alpn_protocol` variable for [HTTP](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#var_ssl_alpn_protocol) and [stream](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#var_ssl_alpn_protocol) + +- The [`$ssl_curve`](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#var_ssl_curve) variable that returns the negotiated curve used for SSL handshake key exchange process + +- The [`proxy_half_close`](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_half_close) directive for stream that allows closing one side of a connection while the data is still transmitted + +- The [`mp4_start_key_frame`](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_start_key_frame) directive in the MP4 module that forces a video to always start with a key frame + + +NGINX Plus R26 is supported on: + +- Alpine Linux 3.12, 3.13, 3.14, 3.15 +- Amazon Linux 2 LTS +- CentOS 7.4+, 8.1+ +- Debian 10, 11 +- FreeBSD 12.1+, 13 +- Oracle Linux 7.4+, 8.1+ +- RHEL 7.4+, 8.1+, 9.0+ +- SUSE Linux Enterprise Server 12 SP5, 15 SP2 +- Ubuntu 18.04 LTS, 20.04 LTS, 22.04 LTS + +**Notes:** + +- Alpine Linux 3.15 is new in this release +- Added support for IBM Z (s390x) for CentOS 8+, RHEL 8+, and Ubuntu 20.04 LTS +- RHEL 8.0+ was updated to RHEL 8.1+ +- CentOS 8.0+ was updated to CentOS 8.1+ +- CentOS 8 is deprecated +- Power 8 is deprecated +- Alpine 3.12 is deprecated +- Alpine 3.11 is no longer supported +- The [`js_include`](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_include) directive was removed, the [`js_import`](http://nginx.org/en/docs/http/ngx_http_js_module.html#js_import) directive should be used instead +- The [`aio sendfile`](https://nginx.org/en/docs/http/ngx_http_core_module.html#aio) directive was removed, the [`sendfile`](https://nginx.org/en/docs/http/ngx_http_core_module.html#sendfile) directive should be used instead +- The third-party `Cookie‑Flag` was removed from the dynamic modules repository, the [`proxy_cookie_flags`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_flags) directive should be used instead +- Swagger UI with REST API YAML specification is not included into NGINX Plus packages by default any more and now is a part of [docs.nginx.com](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#the-swagger-ui) + +More information: [Announcing NGINX Plus R26](https://www.nginx.com/blog/nginx-plus-r26-released/) + + +### NGINX Plus R26 Update + +This is a bug‑fix release for NGINX Plus R26. + +NGINX Plus R26 P1
      +_19 October 2022_ + +- In HLS ([CVE-2022-41743](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41743)) and MP4 ([CVE-2022-41741](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41741)) modules when processing specially crafted video files a memory corruption, or a memory disclosure in MP4 module ([CVE-2022-41742](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41742)) could happen. + + +## NGINX Plus Release 25 (R25) +_28 September 2021_
      +_Based on NGINX Open Source 1.21.3_ + +NGINX Plus R25 is a feature release: + +- JWT authentication: + + - support for signed and then encrypted Nested JWT with the `nested` parameter of the [auth_jwt_type](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_type) directive + + - additional conditions for JWT validation can be specified with the [auth_jwt_require](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_require) directive + + - the [$jwt_payload](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#var_jwt_payload) variable that returns either enclosed JWS token for Nested JWT, or JSON with claims for JWE + + - now it is possible to have multiple [auth_jwt_key_file](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) and [auth_jwt_key_request](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directives within the same context + + - asymmetric RSA-OAEP cryptographic algorithms for JWE + +- API version 7: HTTP status code statistics are now collected per-code, in addition to aggregation per-class, for [upstreams](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream), [server zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_server_zone), and [location zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_location_zone) + +- Stream health checks: introduced the [persistent](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_persistent) parameter in the [health_check](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check) directive that enables persistence of mandatory health check status during configuration reload + +- TCP Fast Open support with the `fastopen` parameter of the [listen](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) directive in the stream module + +- Mail proxy: + + - the number of errors before closing the connection can be specified with the [max_errors](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#max_errors) directive to mitigate against ALPACA attack + + - support for POP3 and IMAP pipelining + + - the `Auth-SSL-Protocol` and `Auth-SSL-Cipher` header lines are now [passed](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol) to the mail proxy authentication server + + +- Security hardening of HTTP request parsing. NGINX Plus will return an error if: + + - spaces or control characters are found in the request line, header names, or the `Host` request header line + + - the `CONNECT` method is used + + - both `Content-Length` and `Transfer-Encoding` header lines are present in the request + +- Request body filters API now permits buffering of the data being processed. + +- Support for dynamic SSL certificate loading for [http](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate), [grpc](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_certificate), and [uwsgi](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_certificate) backends + + +NGINX Plus R25 is supported on: + +- Alpine Linux 3.11, 3.12, 3.13, 3.14 +- Amazon Linux 2 LTS +- CentOS 7.4+, 8.0+ +- Debian 10, 11 +- FreeBSD 12.1+, 13 +- Oracle Linux 7.4+ +- RHEL 7.4+, 8.0+ +- SUSE Linux Enterprise Server 12 SP5, 15 SP2 +- Ubuntu 18.04 LTS, 20.04 LTS + +**Notes:** + +- Alpine 3.14 is new in this release +- Alpine 3.10 is no longer supported +- Amazon Linux (2018.03+) is no longer supported +- Debian 11 is new in this release +- FreeBSD 11.4+ is no longer supported +- Ubuntu 16.04 is no longer supported + +More information: [Announcing NGINX Plus R25](https://www.nginx.com/blog/nginx-plus-r25-released/) + + +### NGINX Plus R25 Update + +This is a bug‑fix release for NGINX Plus R25. + +NGINX Plus R25 P1
      +_14 December 2021_ + +- Swagger UI updated to version 4.1.2 +- Fixed a crash that might happen when an upstream server was updated via the API + + +## NGINX Plus Release 24 (R24) +_27 April 2021_
      +_Based on NGINX Open Source 1.19.10_ + +NGINX Plus R24 is a feature release: + +- Support for JSON Web Encryption added to the [JSON Web Token (JWT) module](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_type) + +- HTTP health checks: introduced the [persistent](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_persistent) parameter in the [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive that enables persistence to mandatory health checks after reload + +- Flags in the [proxy_cookie_flags](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_flags) directive can now contain variables + +- Support for [PROXY Protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) in mail (the `proxy_protocol` parameter of the [listen](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#listen) directive, [proxy_protocol](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_protocol) and [set_real_ip_from](https://nginx.org/en/docs/mail/ngx_mail_realip_module.html#set_real_ip_from) directives) + +- If free worker connections are exhausted, NGINX Plus starts closing not only keepalive connections, but also connections in [lingering_close](https://nginx.org/en/docs/http/ngx_http_core_module.html#lingering_close) + +- The maximum duration of a persistent connection can be limited with the +`keepalive_time` directive for [http](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_time) and [upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive_time) servers + +- New variable, [$connection_time](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_connection_time), that keeps connection time + +NGINX Plus R24 is supported on: + +- Alpine Linux 3.10, 3.11, 3.12, 3.13 +- Amazon Linux (2018.03+), Amazon Linux 2 LTS +- CentOS 7.4+, 8.0+ +- Debian 10 +- FreeBSD 11.4+, 12.1+, 13 +- Oracle Linux 7.4+ +- RHEL 7.4+, 8.0+ +- SUSE Linux Enterprise Server 12 SP5, 15 SP2 +- Ubuntu 16.04 LTS, 18.04 LTS, 20.04 LTS + +**Notes:** + +- FreeBSD 13 is new in this release +- Alpine 3.13 is new in this release +- SUSE Linux Enterprise Server 15 SP2 is new in this release +- CentOS 7 (aarch64) is new in this release +- Amazon Linux 1 (2018) is deprecated +- Ubuntu 16.04 is deprecated +- Alpine Linux 3.10 is deprecated +- Debian 9 is no longer supported +- Amazon Linux 2 now depends on OpenSSL 1.1 package. + +**Upgrade Note:** + +NGINX Plus repositories have been separated into individual repositories based on operating system distribution and license subscription. Before upgrading from previous NGINX Plus versions, you must first reconfigure your repositories to point to the correct location. To reconfigure your repository, follow the [installation instructions](http://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) for your operating system. + +More information: [Announcing NGINX Plus R24](https://www.nginx.com/blog/nginx-plus-r24-released/) + + +### NGINX Plus R24 Updates +These are bug‑fix releases for NGINX Plus R24. + +NGINX Plus R24 P1
      +_18 May 2021_ + +- Resolver: an [issue](https://support.f5.com/csp/article/K12331123) in NGINX resolver may allow an attacker who is able to forge UDP packets from the specified DNS server to cause a 1-byte memory overwrite, resulting in a worker process interruption or other unspecified impact ([CVE-2021-23017](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-23017)) + +NGINX Plus R24 P2
      +_14 December 2021_ + +- Swagger UI updated to version 4.1.2 + + +## NGINX Plus Release 23 (R23) +_8 December 2020_
      +_Based on NGINX Open Source 1.19.5_ + +NGINX Plus R23 is a feature release: + +- gRPC health checks: introduced the [type=grpc](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check_grpc) parameter in the [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive that enables active health checks of gRPC upstream servers + +- [Sticky cookie](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky) load-balancing method now can accept the [SameSite](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_samesite) attribute with `Strict`, `Lax`,or `None` values + +- Support for cookie flags with the [proxy_cookie_flags](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_flags) and [userid_flags](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_flags) directives + +- Introduced script that performs [unprivileged installation](https://docs.nginx.com/nginx/admin-guide/installing-nginx#unpriv_install) of NGINX Plus + +- New command-line switch to redefine an error log file: [-e](https://nginx.org/en/docs/switches.html) + +- New [set](https://nginx.org/en/docs/stream/ngx_stream_set_module.html#set) directive for stream that allows setting a value for a variable + +- Added support for arbitrary [OpenSSL configuration commands](https://www.openssl.org/docs/man1.1.1/man3/SSL_CONF_cmd.html) with the [ssl_conf_command](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_conf_command) directive + +- The [ssl_reject_handshake](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_reject_handshake) directive that allows rejecting the SSL handshake in the `server` block + +- Support for [proxy_smtp_auth](http://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_smtp_auth) user authentication on the SMTP backend in mail proxy + +- Cache manager improved to monitor the minimum amount of free space (the `min_free` parameter of the [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) directive) + +NGINX Plus R23 is supported on: + +- Alpine Linux 3.10, 3.11, 3.12 +- Amazon Linux (2018.03+), Amazon Linux 2 LTS +- CentOS 7.4+, 8.0+ +- Debian 9, 10 +- FreeBSD 11.4+, 12.1+ +- Oracle Linux 7.4+ +- RHEL 7.4+, 8.0+ +- SUSE Linux Enterprise Server 12, 15 +- Ubuntu 16.04 LTS, 18.04 LTS, 20.04 LTS + +**Notes:** + +- Alpine 3.12 is new in this release +- Alpine 3.9 is no longer supported +- CentOS/RHEL 6.x is no longer supported +- Debian 10 (aarch64) is new in this release +- Ubuntu 19.10 is no longer supported + +More information: [Announcing NGINX Plus R23](https://www.nginx.com/blog/nginx-plus-r23-released/) + + +### NGINX Plus R23 Update + +This is a bug‑fix release for NGINX Plus R23. + +NGINX Plus R23 P1
      +_18 May 2021_ + +- Resolver: an [issue](https://support.f5.com/csp/article/K12331123) in NGINX resolver may allow an attacker who is able to forge UDP packets from the specified DNS server to cause a 1-byte memory overwrite, resulting in a worker process interruption or other unspecified impact ([CVE-2021-23017](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-23017)) + + +## NGINX Plus Release 22 (R22) +_9 June 2020_
      +_Based on NGINX Open Source 1.19.0_ + +NGINX Plus R22 is a feature release: + +- Client certificate OCSP validation +- Realtime [limit_conn](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html) and [limit_req](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html) dashboard charts +- [Delay](https://nginx.org/en/docs/http/ngx_http_core_module.html#auth_delay) on authentication failure + +NGINX Plus R22 is supported on: + +- Alpine Linux 3.9, 3.10, 3.11 +- Amazon Linux (2018.03+), Amazon Linux 2 LTS +- CentOS 6.5+, 7.4+, 8.0+ +- Debian 9, 10 +- FreeBSD 11.3+, 12.1+ +- Oracle Linux 6.5+, 7.4+ +- RHEL 6.5+, 7.4+, 8.0+ +- SUSE Linux Enterprise Server 12, 15 +- Ubuntu 16.04 LTS, 18.04 LTS, 19.10, 20.04 LTS + +**Notes:** + +- Alpine 3.8 is no longer supported + +More information: [Announcing NGINX Plus R22](https://www.nginx.com/blog/nginx-plus-r22-released/) + + +## NGINX Plus Release 21 (R21) +_7 April 2020_
      +_Based on NGINX Open Source 1.17.9_ + +NGINX Plus R21 is a feature release: + +- Support for a variable parameter to the [grpc_pass](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_pass) directive enables dynamic gRPC routing + +NGINX Plus R21 is supported on: + +- Alpine Linux 3.8, 3.9, 3.10, 3.11 +- Amazon Linux (2018.03+), Amazon Linux 2 LTS +- CentOS 6.5+, 7.4+, 8.0+ +- Debian 9, 10 +- FreeBSD 11.2+, 12.0+ +- Oracle Linux 6.5+, 7.4+ +- RHEL 6.5+, 7.4+, 8.0+ +- SUSE Linux Enterprise Server 12, 15 +- Ubuntu 16.04 LTS, 18.04 LTS, 19.10, 20.04 LTS + +**Notes:** + +- Alpine 3.11 is new in this release +- Ubuntu 20.04 is new in this release +- Ubuntu 19.04 is no longer supported +- NGINX Plus is no longer available for 32‑bit (i386) platforms. Applies to: + - CentOS/Oracle Linux/RHEL 6.5+ (x86_64 still supported) + - Debian 9, 10 (x86_64 still supported) + - Ubuntu 16.04 LTS (x86_64, aarch64, ppc64le still supported) + +More information: [Announcing NGINX Plus R21](https://www.nginx.com/blog/nginx-plus-r21-released/) + + +## NGINX Plus Release 20 (R20) +_3 December 2019_
      +_Based on NGINX Open Source 1.17.6_ + +NGINX Plus R20 is a feature release: + +- Enhancements to rate limiting: endpoint in NGINX Plus API for real‑time metrics, [$limit_req_status](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_status) variable captures request's rate‑limiting status in access log +- Enhancements to connection limiting: endpoint in NGINX Plus API for real‑time metrics, [$limit_conn_status](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_status) variable captures request's connection‑limiting status in access log, dry‑run mode with [limit_conn_dry_run](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_dry_run) directive +- Support in key‑value store for matching on start of character strings (new `type=prefix` parameter to [keyval_zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone) directive) +- Separate DNS resolution in each upstream group ([resolver](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) directive) +- PROXY Protocol variables capture IP address and port of original proxy server ([$proxy_protocol_server_{addr,port}](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_server_addr)) +- Security improvements for HTTP/2: better detection of invalid client behavior, improved error responses, improved functioning of [proxy_request_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering) and [worker_shutdown_timeout](https://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout) directives + +NGINX Plus R20 R20 is supported on: + +- Alpine Linux 3.8, 3.9, 3.10 +- Amazon Linux (2018.03+), Amazon Linux 2 LTS +- CentOS 6.5+, 7.4+, 8.0+ +- Debian 9, 10 +- FreeBSD 11.2+, 12.0+ +- Oracle Linux 6.5+, 7.4+ +- RHEL 6.5+, 7.4+, 8.0+ +- SUSE Linux Enterprise Server 12, 15 +- Ubuntu 16.04 LTS, 18.04 LTS, 19.04, 19.10 + +**Notes:** + +- CentOS 8.0+ is new in this release +- FreeBSD 12.1 is new in this release +- RHEL 8.1 is new in this release +- Ubuntu 19.10 is new in this release + +More information: [Announcing NGINX Plus R20](https://www.nginx.com/blog/nginx-plus-r20-released/) + + +## NGINX Plus Release 19 (R19) +_13 August 2019_
      +_Based on NGINX Open Source 1.17.3_ + +NGINX Plus R19 is a feature release: + +- Metrics for individual [location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) blocks (enabled by [status_zone](https://nginx.org/en/docs/http/ngx_http_status_module.html#status_zone) directive) +- Metrics about DNS resolver functionality (new `status_zone` parameter to [resolver](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) directive) +- Two new tabs on NGINX Plus live activity monitoring dashboard for metrics about DNS and clustering; per‑location metrics are also reported +- Dry‑run mode for testing effects of [request‑rate limits](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req) on production traffic without actually enforcing them (new [limit_req_dry_run](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_dry_run) directive) +- Support in key‑value store for IP address ranges in CIDR notation as well as individual addresses (new `type=ip` parameter to [keyval_zone](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone) directive) +- Expiration time can be set for each key‑value entry to override default expiration time, either at creation time for new entry or as a modification to existing entry +- The parameter to the [limit_rate](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate), [limit_rate_after](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after), [proxy_download_rate](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_download_rate), and [proxy_upload_rate](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_upload_rate) directives can be a variable + +NGINX Plus R19 is supported on: + +- Alpine Linux 3.8, 3.9, 3.10 +- Amazon Linux (2018.03+), Amazon Linux 2 LTS +- CentOS 6.5+, 7.4+ +- Debian 9, 10 +- FreeBSD 11.2+, 12.0 +- Oracle Linux 6.5+, 7.4+ +- RHEL 6.5+, 7.4+, 8 +- SUSE Linux Enterprise Server 12, 15 +- Ubuntu 16.04 LTS, 18.04 LTS, 19.04 + +**Notes:** + +- Alpine Linux 3.10 is new in this release +- Debian 8 is no longer supported +- Debian 10 is new in this release +- Ubuntu 14.04 LTS and 18.10 are no longer supported +- Ubuntu 19.04 is new in this release + +More information: [Announcing NGINX Plus R19](https://www.nginx.com/blog/nginx-plus-r19-released/) + + +## NGINX Plus Release 18 (R18) +_9 April 2019_
      +_Based on NGINX Open Source 1.15.10_ + +NGINX Plus R18 is a feature release: + +- Dynamic SSL certificate loading, either from [file](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate) or from [key-value](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) storage (for the latter case, prefix the variable with `data:`) +- New features in [OpenID Connect reference implementation](https://github.com/nginxinc/nginx-openid-connect): opaque session tokens as a browser cookie, refresh tokens to refresh expired ID tokens without user interaction, and a logout URL +- Additional logic for verifying arbitrary variables in active health checks (new `require` parameter to [match](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match) directive) +- Wildcard support for [listen](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) directive means same [zone_sync](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync) configuration can now be used for all instances in a cluster +- Port ranges supported for [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive +- For TCP/UDP, existing connections to proxied upstream server can be explicitly closed after server is removed from upstream group due to health check failure, API call, or re-resolve action (new [proxy_session_drop](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_session_drop) directive) +- New variable, [$upstream_bytes_sent](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_bytes_sent), contains number of bytes sent to an upstream server +- New or updated dynamic modules: + - [Brotli](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/brotli/) (New): General‑purpose, lossless data compression algorithm + - [OpenTracing](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/opentracing/) (New): Ability to instrument NGINX Plus with OpenTracing‑compliant requests for a range of distributed tracing services, such as Datadog, Jaeger, and Zipkin + - [Lua](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/lua//) (Updated): Scripting language for NGINX Plus, updated to use LuaJIT 2.1 + - [NGINX JavaScript](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) (Updated): JavaScript module for NGINX Plus, updated to version [0.3.0](https://nginx.org/en/docs/njs/changes.html#njs0.3.0) + +NGINX Plus R18 is supported on: + +- Alpine Linux 3.8, 3.9 +- Amazon Linux (2018.03+), Amazon Linux 2 LTS +- CentOS 6.5+, 7.4+ +- Debian 8.0, 9.0 +- FreeBSD 11.2+, 12.0 +- Oracle Linux 6.5+, 7.4+ +- RHEL 6.5+, 7.4+, 8 +- SUSE Linux Enterprise Server 12, 15 +- Ubuntu 14.04 LTS, 16.04 LTS, 18.04, 18.10 + +**Notes:** + +- Amazon Linux 2017.09 is no longer supported; minimum supported version is now 2018.03 +- CentOS/Oracle/Red Hat Enterprise Linux 7.3 is no longer supported; minimum supported version is now 7.4 +- Debian 8.0 will be removed at NGINX Plus R19 +- Ubuntu 14.04 will be removed at NGINX Plus R19 + +More information: [Announcing NGINX Plus R18](https://www.nginx.com/blog/nginx-plus-r18-released/) + +### NGINX Plus R18 Update + +This is a bug‑fix release for NGINX Plus R18. + +NGINX Plus R18 P1
      +_6 August 2019_ + +- Security patch: When using HTTP/2 a client might cause excessive memory consumption and CPU usage ([CVE-2019-9511](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9511), [CVE-2019-9513](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9513), [CVE-2019-9516](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9516)) + + +## NGINX Plus Release 17 (R17) +_11 December 2018_
      +_Based on NGINX Open Source 1.15.7_ + +NGINX Plus R17 is a feature release: + +- Support for TLS 1.3 using `TLSv1.3` parameter to [ssl_protocols](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) directive +- Two‑stage [rate limiting](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html) with the new `delay=` parameter; excessive requests are initially delayed and then ultimately rejected +- Support for the Ed25519 and Ed448 cryptographic algorithms added to the [JSON Web Token (JWT) module](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html) +- Ability to fetch JSON Web Keys (JWK) directly from identity provider (IdP) when using OpenID Connect (new [auth_jwt_key_request](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directive) +- TCP keepalives between NGINX Plus and the proxied server (new [proxy_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_socket_keepalive) directive) +- Control over how long HTTP keepalive connection between NGINX Plus and proxied server can be idle before being closed (new [keepalive_timeout](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive_timeout) directive) +- For UDP, number of packets sent from NGINX Plus to proxied server before new UDP "session" to that server is started can be set explicitly (new [proxy_requests](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_requests) directive) +- [Zone Synchronization](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html) module can now pass server name using SNI when connecting to cluster nodes for server name verification (new [zone_sync_ssl_server_name](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_server_name) directive) +- The NGINX JavaScript module has been updated: + - Support for arguments objects + - Support for non‑integer fractions + - Support for additional time methods: `console.time()` and `console.timeEnd()` + - Variables and functions can be redeclared + - Integration with the NGINX Stream module for TCP/UDP applications has been refactored to use various return functions, including a `send()` method for modifying ingress trafficl egress traffic is now available through a callback + +NGINX Plus R17 is supported on: + +- Alpine Linux 3.8, 3.9 +- Amazon Linux (2017.09), Amazon Linux 2 LTS +- CentOS 6.5+, 7.0+ +- Debian 8.0, 9.0 +- FreeBSD 11.2+, 12.0 +- Oracle Linux 6.5+, 7.0+ +- RHEL 6.5+, 7.0+ +- SUSE Linux Enterprise Server 12, 15 +- Ubuntu 14.04 LTS, 16.04 LTS, 18.04, 18.10 + +**Notes:** + +- Alpine Linux 3.8 and 3.9 are new in this release +- CentOS/Oracle Linux/RHEL 7.3 will be removed at NGINX Plus R18 +- FreeBSD 11.2 and 12.0 are new in this release; versions 10.4 and 11.1 are no longer supported +- Ubuntu 14.04 will be removed at NGINX Plus R19 +- Ubuntu 18.10 is new in this release + +More information: [Announcing NGINX Plus R17](https://www.nginx.com/blog/nginx-plus-r17-released/) + + +## NGINX Plus Release 16 (R16) +_5 September 2018_
      +_Based on NGINX Open Source 1.15.2_ + +NGINX Plus R16 is a feature release: + +- [Rate limiting](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html) in a cluster using [Zone Synchronization](https://www.nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html) module +- [Key-value store](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) in a cluster using [Zone Synchronization](https://www.nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html) module +- Timeouts in [Key-Value Store](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) module +- New [random](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#random) load‑balancing algorithm with Random with Two Choices variant, for which [least_time](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_time) or [least_conn](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_conn) can be used to decide between the two choices +- UDP load balancing ([stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html) module) enhanced with support for multiple UDP packets from the client, enabling use of more complex UDP protocols such as OpenVPN, VoIP, and VDI +- Support for [PROXY Protocol v2 (PPv2) header](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt), and ability to inspect custom TLV values in header +- Support for [AWS PrivateLink](https://aws.amazon.com/privatelink/), Amazon's technology for creating secure tunnels into a VPC +- opaque session token support in the [OpenID Connect reference implementation](https://github.com/nginxinc/nginx-openid-connect) +- New [$ssl_preread_protocol](https://nginx.org/en/docs/stream/ngx_stream_ssl_preread_module.html#var_ssl_preread_protocol) variable to distinguish between SSL/TLS and other protocols when forwarding traffic using a TCP ([stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html)) proxy +- New [Encrypted Session](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/encrypted-session/) dynamic module +- The NGINX JavaScript module has been updated: + - Single object (`r`) is used to access both request and response attributes associated with each HTTP request + - New language support: `bytesFrom()`, `padStart()`, `padEnd()`, `getrandom()`, `getentropy()`, and binary literals + +NGINX Plus R16 is supported on: + +- Amazon Linux (2017.09), Amazon Linux 2 LTS +- CentOS 6.5+, 7.0+ +- Debian 8.0, 9.0 +- FreeBSD 10.4+, 11.1+ +- Oracle Linux 6.5+, 7.0+ +- RHEL 6.5+, 7.0+ +- SUSE Linux Enterprise Server 12 +- Ubuntu 14.04 LTS, 16.04 LTS, 18.04 + +**Notes:** + +- FreeBSD 10.4+ and 11.1+ are new in this release; versions 10.3 and 11.0 are no longer supported +- Amazon Linux 2 (LTS) is updated to the GA version. +- Ubuntu 17.10 is no longer supported +- The Upstream Conf and Extended Status modules are superseded by the [NGINX Plus API](https://nginx.org/en/docs/http/ngx_http_api_module.html) module and are no longer distributed in NGINX Plus (see our [transition guide](https://www.nginx.com/blog/transitioning-to-nginx-plus-api-configuration-monitoring/) for details) +- The [New Relic plug‑in](https://newrelic.com/integrations/nginx) for NGINX has been updated to use the new NGINX Plus API, but is no longer supported by NGINX, Inc. + +More information: [Announcing NGINX Plus R16](https://www.nginx.com/blog/nginx-plus-r16-released/) + +### NGINX Plus R16 Update + +This is a bug‑fix release for NGINX Plus R16. + +NGINX Plus R16 P1
      +_30 October 2018_ + +- Security patch: When using HTTP/2 a client might cause excessive memory consumption ([CVE-2018-16843](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-16843)) and CPU usage ([CVE-2018-16844](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-16844)) +- Security patch: Processing of a specially crafted MP4 file with the ngx\_http\_mp4\_module might result in worker process memory disclosure ([CVE-2018-16845](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-16845)) + + +## NGINX Plus Release 15 (R15) +_10 April 2018_
      +_Based on NGINX Open Source 1.13.10_ + +NGINX Plus R15 is a feature release: + +- Proxying, load balancing, and SSL-termination of gRPC traffic +- HTTP/2 server push +- Sticky learn session persistence in a cluster using new [Zone Synchronization](https://www.nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html) module, which synchronizes [shared memory zones](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) across a cluster of NGINX Plus instances +- [OpenID Connect (OIDC) authorization code flow](https://github.com/nginxinc/nginx-openid-connect), enabling integration with CA Single Sign-On (formerly SiteMinder), ForgeRock OpenAM, Keycloak, Okta, and other identity providers +- Subrequests from the [NGINX JavaScript](https://www.nginx.com/blog/introduction-nginscript/) module +- Crypto libraries in NGINX JavaScript module with support for common hash functions MD5, SHA-1, and SHA-256 +- Inheritance of the `CAP_NET_RAW` Linux capability so that transparent proxying does not require worker processes to have root privileges +- New [auth_jwt_leeway](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_leeway) directive to compensate for clock skew between NGINX Plus and identity provider +- Performance enhancements and bug fixes to [NGINX WAF](https://www.nginx.com/products/nginx-waf/) module +- Updates to [LDAP authentication reference implementation](https://github.com/nginxinc/nginx-ldap-auth) +- New [$upstream_queue_time](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_queue_time) variable to hold the amount of time a request spends in the [upstream queue](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#queue) +- New [$ssl_preread_alpn_protocols](https://nginx.org/en/docs/stream/ngx_stream_ssl_preread_module.html#var_ssl_preread_alpn_protocols) variable to hold the Application Layer Protocol Negotiation (ALPN) protocols presented by client +- New [Cookie-Flag](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/cookie-flag/) dynamic module + +NGINX Plus R15 is supported on: + +- Amazon Linux (2017.09), Amazon Linux 2 LTS +- CentOS 6.5+, 7.0+ +- Debian 8.0, 9.0 +- FreeBSD 10.3, 11.0 +- Oracle Linux 6.5+, 7.0+ +- RHEL 6.5+, 7.0+ +- SUSE Linux Enterprise Server 12 +- Ubuntu 14.04 LTS, 16.04 LTS, 17.10, 18.04 + +**Notes:** + +- Ubuntu 17.04 is no longer supported +- nginScript is now known as the NGINX JavaScript module +- The NGINX Plus API version has been incremented to 3; all previous versions of the NGINX Plus API are still supported +- This is the last release to support the deprecated dynamic (on-the-fly) reconfiguration and extended status APIs (see our [transition guide](https://www.nginx.com/blog/transitioning-to-nginx-plus-api-configuration-monitoring/) for details) + +More information: [Announcing NGINX Plus R15](https://www.nginx.com/blog/nginx-plus-r15-released/) + +### NGINX Plus R15 Updates + +These are bug‑fix releases for NGINX Plus R15. + +NGINX Plus R15 P2
      +_30 October 2018_ + +- Security patch: When using HTTP/2 a client might cause excessive memory consumption ([CVE-2018-16843](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-16843)) and CPU usage ([CVE-2018-16844](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-16844)) +- Security patch: Processing of a specially crafted mp4 file with the ngx_http_mp4_module might result in worker process memory disclosure ([CVE-2018-16845](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-16845)) + +NGINX Plus R15 P1
      +_12 April 2018_ + +- Third‑party modules might not be loaded due to signature incompatibility + + +## NGINX Plus Release 14 (R14) +_12 December 2017_ +_NGINX Open Source build 1.13.7_ + +NGINX Plus R14 is a feature release: + +- Nested JSON Web Token (JWT) claims, array data, and longer key sizes (256‑, 384‑, and 512‑bit) for JWT signing algorithms, providing more flexibility and security when validating JWTs +- Clustering support for the [sticky_learn](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_learn) method of session persistence, as a technology preview of distribution of session state data in a cluster +- [Key‑value store](https://nginx.org/en/docs//http/ngx_http_keyval_module.html) and NGINX Plus API in the `stream` context, making the same key‑value store features are available for TCP/UDP applications as for HTTP applications +- New NGINX Plus [dashboard](https://demo.nginx.com/) utilizing the NGINX Plus API which was introduced in [NGINX Plus R13](#r13) +- Improvements to [NGINX JavaScript](https://www.nginx.com/blog/introduction-nginscript/) module, including the ability to manage JSON objects, read content from filesystems, and backtrace to errors and exceptions to further improve troubleshooting +- Ability to encode client certificates in a HTTP header and send them to backend applications with the [$ssl_client_escaped_cert](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#variables) variable +- Enhanced DNS resolver that preserves the list of upstream IP addresses across a reload of the NGINX Plus configuration +- Ability to drain upstream servers extended to file‑based configurations with the [drain](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) parameter to the upstream `server` directive + +
    + +NGINX Plus R14 is supported on: + +- Amazon Linux (2016.09), Amazon Linux 2 (2017.12) +- CentOS 6.5+, 7.0+ +- Debian 8.0, 9.0 +- FreeBSD 10.3, 11.0 +- Oracle Linux 6.5+, 7.0+ +- RHEL 6.5+, 7.0+ +- SUSE Linux Enterprise Server 12 +- Ubuntu 14.04 LTS, 16.04 LTS, 17.04, 17.10 + +**Notes:** + +- Debian 7.0 is no longer supported +- Ubuntu 17.10 is new in this release +- The Upstream Conf and Extended Status APIs were deprecated in [NGINX Plus R13](#r13); support will continue only through NGINX Plus R15 (see our [transition guide](https://www.nginx.com/blog/transitioning-to-nginx-plus-api-configuration-monitoring/) for details) + +More information: [Announcing NGINX Plus R14](https://www.nginx.com/blog/nginx-plus-r14-released/) + +### NGINX Plus R14 Updates + +This is a bug‑fix release for NGINX Plus R14. + +NGINX Plus R14 P1
    +_25 January 2018_ + +- Live activity monitoring: Reinstated some missing tooltips for the dashboard +- NGINX Plus API: HTTP Basic Authentication support for read‑write mode + + +## NGINX Plus Release 13 (R13) +_29 August 2017_
    +_Based on NGINX Open Source 1.13.4_ + +NGINX Plus R13 is a feature release: + +- Ability to send duplicate all incoming traffic to a dedicated server (the [mirror](https://nginx.org/en/docs/http/ngx_http_mirror_module.html#mirror) directive) +- Improvements to [NGINX JavaScript](https://www.nginx.com/blog/introduction-nginscript/) module, including the new interactive shell to facilitate development of NGINX JavaScript code +- New [NGINX Plus API](https://nginx.org/en/docs/http/ngx_http_api_module.html) that incorporates the functionality of the previous [upstream_conf](https://nginx.org/en/docs/http/ngx_http_upstream_conf_module.html) and [(extended) status](https://nginx.org/en/docs/http/ngx_http_status_module.html) APIs; it includes a [Swagger](https://demo.nginx.com/swagger-ui/) specification and adds support for [key‑value stores](https://nginx.org/en/docs//http/ngx_http_keyval_module.html) +- New build tool ([download here](https://hg.nginx.org/pkg-oss/raw-file/default/build_module.sh)) that creates installable packages of the many third‑party modules available for NGINX and NGINX Plus +- Ability to gracefully shut down all live client connections when restarting NGINX Plus (the [worker_shutdown_timeout](https://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout) directive) +- Support for adding HTTP trailers (the [add_trailer](https://nginx.org/en/docs/http/ngx_http_headers_module.html#add_trailer) directive) +- Improvement to session persistence: quicker establishment of sticky sessions between clients and upstream groups (the `header` parameter to the [sticky learn](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky) directive) +- Support for the third‑party [HTTP Substitutions Filter](https://github.com/yaoweibin/ngx_http_substitutions_filter_module) module, distributed in NGINX Plus packages and available on the [Dynamic Modules](https://www.nginx.com/products/modules/) page + +NGINX Plus R13 is supported on: + +- Amazon Linux 2016.09+ +- CentOS 6.5+, 7.0+ +- Debian 7.0, 8.0, 9.0 +- FreeBSD 10.3, 11.0 +- Oracle Linux 6.5+, 7.0+ +- RHEL 6.5+, 7.0+ +- Ubuntu 14.04 LTS, 16.04 LTS, 17.04 + +**Notes:** + +- CentOS/Oracle Linux/RHEL 5.10+ is no longer supported +- Ubuntu 12.04 LTS and 16.10 are no longer supported +- Ubuntu 17.04 is new in this release +- The `sticky_cookie_insert` directive (deprecated in [NGINX Plus R2](#r2)) has been removed +- The [upstream_conf](https://nginx.org/en/docs/http/ngx_http_upstream_conf_module.html) and [(extended) status](https://nginx.org/en/docs/http/ngx_http_status_module.html) APIs are deprecated by the new [NGINX Plus API](https://nginx.org/en/docs/http/ngx_http_api_module.html) and will be removed in a future release + +More information: [Announcing NGINX Plus R13](https://www.nginx.com/blog/nginx-plus-r13-released) + + +## NGINX Plus Release 12 (R12) +_14 March 2017_
    +_Based on NGINX Open Source 1.11.10_ + +NGINX Plus R12 is a feature release: + +- Synchronization of NGINX Plus configuration across instances in a cluster, from a single primary node (new `nginx_sync` package) +- Updates to Extended Status module [data set](https://nginx.org/en/docs/http/ngx_http_status_module.html#data), including NGINX Plus version (`nginx_build`), usage statistics for shared memory zones (under the `slabs/` subtree), and additional upstream fields (`name`, `service`) +- New statistics displayed on [live activity monitoring dashboard](http://demo.nginx.com/): NGINX Plus version, response time metrics, shared memory zones usage, and server names for upstreams +- [Support](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_use_stale) for the `stale-while-revalidate` and `stale-if-error` extensions to the `Cache-Control` header, as defined by [RFC 5861](https://www.ietf.org/rfc/rfc5861.txt) +- Ability to bypass cache for byte range requests after a specified offset (the [proxy_cache_max_range_offset](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_max_range_offset) directive) +- Length of `Vary` and `ETag` cache headers increased to 128 bytes; note that the on‑disk cache format has changed, so cached content is invalidated after the upgrade and must be refreshed from the origin server +- `mandatory` parameter to the `health_check` directive ([HTTP](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) and [Stream](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check)) which requires servers newly added to an `upstream` group to pass the associated health check before receiving real traffic +- “Zero config” UDP [health check](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check) which does not require specifying a [match](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match) block +- Support in the Stream module for verification of [client SSL certificates](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_verify_client) for TCP applications +- [SSL variables](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#variables) representing various details about client certificates and capabilities (`$ssl_ciphers`, `$ssl_client_v_end`, `$ssl_client_v_start`, `$ssl_client_v_remain`, and `$ssl_curves`) +- The `$ssl_client_verify` variable includes the reason for failure +- The `$ssl_client_i_dn` and `ssl_client_s_dn` variables comply with [RFC 2253](https://www.ietf.org/rfc/rfc2253.txt); legacy variants are available as `$ssl_client_i_dn_legacy` and `$ssl_client_s_dn_legacy` +- Support for accessing arbitrary JWT fields as [variables](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html) +- Support for JSON escaping in access logs (the `escape` parameter to the [log_format](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) directive) +- WebP support in the [Image-Filter](https://nginx.org/en/docs/http/ngx_http_image_filter_module.html) module. +- Output from the `nginx` `-T` command excludes duplicated sections of configuration +- Improvements to memory usage and performance, including upstream [queue](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#queue) optimization + +NGINX Plus R12 is supported on: + +- Amazon Linux 2016.09+ +- CentOS 5.10+, 6.5+, 7.0+ +- Debian 7.0, 8.0, 9.0 +- FreeBSD 10.3, 11.0 +- Oracle Linux 5.10+, 6.5+, 7.0+ +- RHEL 5.10+, 6.5+, 7.0+ +- SLES 12, 12 SP1 +- Ubuntu 12.04 LTS, 14.04 LTS, 16.04 LTS, 16.10 + +**Notes:** + +- CentOS/Oracle Linux/RHEL 5.10+ will be removed at NGINX Plus R13 +- Debian 9 is new in this release +- FreeBSD 9 is no longer supported +- Ubuntu 12.04 LTS will be removed at NGINX Plus R13 + +More information: [Announcing NGINX Plus R12](https://www.nginx.com/blog/nginx-plus-r12-released/) + +### NGINX Plus R12 Updates + +These are bug‑fix releases for NGINX Plus R12. + +NGINX Plus R12 P3
    +_29 June 2017_ + +- Content caching: Cache response might contain additional internal cache header data + +NGINX Plus R12 P2
    +_30 March 2017_ + +- Live activity monitoring: Response time metric was miscalculated under certain conditions + +NGINX Plus R12 P1
    +_14 March 2017_ + +- Live activity monitoring: Dashboard might hang with certain configurations + + +## NGINX Plus Release 11 (R11) +_25 October 2016_
    +_Based on NGINX Open Source 1.11.5_ + +NGINX Plus R11 is a feature release: + +- Dynamic modules binary compatibility between NGINX Plus and the corresponding version of open source NGINX +- Enhancements to the Stream module: custom [logging](https://nginx.org/en/docs/stream/ngx_stream_log_module.html) with a number of additional [variables](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#variables), [PROXY protocol support](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) for incoming connections, support for [obtaining](https://nginx.org/en/docs/stream/ngx_stream_realip_module.html) real IP address and port from PROXY protocol header, and ability to [extract the server name](https://nginx.org/en/docs/stream/ngx_stream_ssl_preread_module.html) from SNI into a variable for purposes such as custom routing +- Updates to the Extended Status module [data set](https://nginx.org/en/docs/http/ngx_http_status_module.html#data), including additional Stream metrics (`sessions`, `discarded`) +- Cache manager support for iterative operations mode when deleting old cache files, reducing the disk load (see the `manager_files`, `manager_threshold`, and `manager_sleep` parameters of the [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) directive) +- Support for variables in the `domain` parameter to the [sticky](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky) directive +- New variable `$upstream_bytes_received` for both [Stream](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#var_upstream_bytes_received) and [HTTP](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_bytes_received)) + +NGINX Plus R11 is supported on: + +- Amazon Linux 2016.03+ +- CentOS 5.10+, 6.5+, 7.0+ +- Debian 7.0, 8.0 +- FreeBSD 9.3, 10.1+, 11.0 +- Oracle Linux 5.10+, 6.5+, 7.0+ +- RHEL 5.10+, 6.5+, 7.0+ +- SLES 12, 12 SP1 +- Ubuntu 12.04 LTS, 14.04 LTS, 16.04 LTS, 16.10 + +**Notes:** + +- FreeBSD 11.0 is new in this release +- Ubuntu 16.10 is new in this release +- The `nginx-plus-extras` package is no longer provided; migrate to the `nginx-plus` package and then install the needed [dynamic modules](https://www.nginx.com/products/dynamic-modules) + +More information: [Announcing NGINX Plus R11](https://www.nginx.com/blog/nginx-plus-r11-released/) + + +## NGINX Plus Release 10 (R10) +_23 August 2016_
    +_Based on NGINX Open Source 1.11.3_ + +NGINX Plus R10 is a feature release: + +- New dynamic module: [ModSecurity](https://www.nginx.com/waf) (package name is `nginx-plus-module-modsecurity`) built on an early release of ModSecurity 3.0 +- New dynamic module: nginScript (package name is `nginx-plus-module-njs`) +- Support for client authentication using [JSON Web Tokens (JWT)](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html) +- Enhancements to the [Stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html) module used for TCP/UDP load balancing (more [NGINX variables](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#variables), [resolver](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#resolver) support, [map](https://nginx.org/en/docs/stream/ngx_stream_map_module.html) module, [geo](https://nginx.org/en/docs/stream/ngx_stream_geo_module.html) module, [geoip](https://nginx.org/en/docs/stream/ngx_stream_geoip_module.html) module, and [split_clients](https://nginx.org/en/docs/stream/ngx_stream_split_clients_module.html) A/B testing support) +- Support for dual‑stack RSA/ECC certificates by defining multiple [ssl_certificate](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate) and [ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate_key) directives on the same virtual server +- Support for IP Transparency and Direct Server Return (DSR) using the `transparent` parameter to the [proxy_bind](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_bind) directive. DSR only supported for UDP load balancing. +- Support for the `IP_BIND_ADDRESS_NO_PORT` socket option where available, allowing for many more upstream connections (requires Linux kernel 4.2 or later) +- HTTP/2 improvements: support for unbuffered upload,and various bug fixes +- New NGINX variables: [$request_id](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_request_id), [$proxy_protocol_port](https://nginx.org/en/docs/http/ngx_http_core_module.html#var_proxy_protocol_port), [$realip_remote_port](https://nginx.org/en/docs/http/ngx_http_realip_module.html#var_realip_remote_port) +- Modules updated (both in `nginx-plus-extras` and as dynamic modules): + + - [Headers-More](http://github.com/openresty/headers-more-nginx-module) module updated to version 0.31 + - [Lua](http://github.com/openresty/lua-nginx-module) module updated to version 0.10.6 + - [Phusion Passenger Open Source](https://blog.phusion.nl/tag/passenger-releases/) module updated to version 5.0.30 + - [Set-Misc](http://github.com/openresty/set-misc-nginx-module) module updated to version 0.31 + +NGINX Plus R10 is supported on: + +- Amazon Linux 2016.03+ +- CentOS 5.10+, 6.5+, 7.0+ +- Debian 7.0, 8.0 +- FreeBSD 9.3, 10.1+ +- Oracle Linux 5.10+, 6.5+, 7.0+ +- RHEL 5.10+, 6.5+, 7.0+ +- SLES 12, 12 SP1 +- Ubuntu 12.04 LTS, 14.04 LTS, 16.04 LTS + +**Notes:** + +- Ubuntu 15.10 is no longer supported +- NGINX Plus R10 is the last release to include the `nginx-plus-extras` package; if using this package, migrate to the `nginx-plus` package and then install the needed [dynamic modules](https://www.nginx.com/products/dynamic-modules) + +More information: [Announcing NGINX Plus R10](https://www.nginx.com/blog/nginx-plus-r10-released/) + + +## NGINX Plus Release 9 (R9) +_12 April 2016_
    +_Based on NGINX Open Source 1.9.13_ + +NGINX Plus R9 is a feature release: + +- Dynamic loading of modules (both NGINX‑authored and third‑party). The NGINX‑authored modules supported in this release: + - [nginx-plus-module-geoip](https://nginx.org/en/docs/http/ngx_http_geoip_module.html) + - [nginx-plus-module-image-filter](https://nginx.org/en/docs/http/ngx_http_image_filter_module.html) + - [nginx-plus-module-perl](https://nginx.org/en/docs/http/ngx_http_perl_module.html) + - [nginx-plus-module-xslt](https://nginx.org/en/docs/http/ngx_http_xslt_module.html) + + The third‑party modules supported in this release: + + - [nginx-plus-module-headers-more](https://github.com/openresty/headers-more-nginx-module) + - [nginx-plus-module-lua](https://github.com/openresty/lua-nginx-module) + - [nginx-plus-module-passenger](https://www.phusionpassenger.com/) + - [nginx-plus-module-rtmp](https://github.com/arut/nginx-rtmp-module) + - [nginx-plus-module-set-misc](https://github.com/openresty/set-misc-nginx-module) + +- UDP load balancing support, configured in the [stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html) configuration context +- Support for retrieving upstream servers configuration via DNS `SRV` records, configured with the new `service` parameter to the [server](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) directive +- Automatic retrying of DNS requests over TCP when UDP responses are truncated +- Failed nonidempotent HTTP requests (`POST`, `LOCK`, `PATCH`) are no longer retried with the other servers in the `upstream` group, unless the `non_idempotent` parameter is included in the [proxy_next_upstream](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream) directive +- Improved cache metadata accounting +- Automatic binding of worker processes to available CPUs using the new `auto` parameter of the [worker_cpu_affinity](https://nginx.org/en/docs/ngx_core_module.html#worker_cpu_affinity) directive +- Optional offloading of some cache write operations to thread pools, configured with the [aio_write on](https://nginx.org/en/docs/http/ngx_http_core_module.html#aio_write) directive +- Support for customizing the `Server` response header, as well as the signature in standard error messages +- Updated live activity monitoring dashboard +- In the `nginx-plus-extras` package: + - [Headers-More](http://github.com/openresty/headers-more-nginx-module) module updated to version  .29 + - [Lua](http://github.com/openresty/lua-nginx-module) module updated to version 0.10.2 + - [Phusion Passenger Open Source](https://blog.phusion.nl/tag/passenger-releases/) module updated to version 5.0.26 + +NGINX Plus R9 is supported on: + +- Amazon Linux 2016.03+ +- CentOS 5.10+, 6.5+, 7.0+ +- Debian 7.0, 8.0 +- FreeBSD 9.3, 10.1+ +- Oracle Linux 5.10+, 6.5+, 7.0+ +- RHEL 5.10+, 6.5+, 7.0+ +- SLES 12, 12 SP1 +- Ubuntu 12.04 LTS, 14.04 LTS, 15.10, 16.04 LTS + +**Note:** + +- Ubuntu 15.04 is no longer supported. + +More information: [Announcing NGINX Plus R9](https://www.nginx.com/blog/nginx-plus-r9-released/) + +### NGINX Plus R9 Updates + +This is a bug‑fix release for NGINX Plus R9. + +NGINX Plus R9 P1
    +_25 May 2016_ + +- Segmentation fault might occur when writing a client request body to a temporary file +- Specially crafted request might cause NGINX worker process to crash due to a NULL pointer dereference ([CVE-2016-4450](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-4450)) + + +## NGINX Plus Release 8 (R8) +_19 January 2016_
    +_Based on NGINX Open Source 1.9.9_ + +NGINX Plus R8 is a feature release: + +- [OAuth Technology Preview](https://www.nginx.com/blog/oauth-technology-preview/), which performs OAuth 2.0 processing for proxied applications +- Improved [HTTP/2](https://nginx.org/en/docs/http/ngx_http_v2_module.html) implementation now included in the `nginx-plus` and `nginx-plus-extras` packages; the `nginx-plus-http2` package is deprecated +- Caching improvements, including support for caching [HEAD](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_convert_head) requests and more effective caching of large files with the [Cache Slice](https://nginx.org/en/docs/http/ngx_http_slice_module.html) module +- Changes to upstream groups made with the [on‑the‑fly reconfiguration API](https://nginx.org/en/docs/http/ngx_http_upstream_conf_module.html) can now be configured to [persist](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#state) across restarts and configuration reloads +- Support for sending health check requests to a specified port (the `port` parameter to the [health_check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) directive) +- Enhancement to the [Real IP](https://nginx.org/en/docs/http/ngx_http_realip_module.html) module: the new `$realip_remote_addr` variable represents the original client IP address +- Enhancement to [syslog](https://nginx.org/en/docs/syslog.html) logging: the `nohostname` parameter disables logging of the hostname field, which is unnecessary when logging to a local `syslog` server +- Updated live activity monitoring dashboard +- In the `nginx-plus-extras` package: + - [Headers-More](https://github.com/openresty/headers-more-nginx-module) module updated to version 0.28 + - [Lua](https://github.com/openresty/lua-nginx-module) module updated to version 0.9.20 + - [Phusion Passenger Open Source](https://blog.phusion.nl/tag/passenger-releases/) module updated to version 5.0.22 + - [Redis](https://github.com/openresty/lua-resty-redis) module for Lua access updated to version 0.21 + +NGINX Plus R8 is supported on: + +- Amazon Linux +- CentOS 5.10+, 6.5+, 7.0 +- Debian 7.0, 8.0 +- FreeBSD 9.3, 10.1+ +- Oracle Linux 5.10+, 6.5+, 7.0 +- RHEL 5.10+, 6.5+, 7.0 +- SLES 12, 12 SP1 +- Ubuntu 12.04 LTS, 14.04 LTS, 15.04, 15.10 + +NGINX Plus R8 does not include the `nginx-plus-lua` package; if you previously used this package, migrate to the `nginx-plus-extras` package + +More information: [Announcing NGINX Plus R8](https://www.nginx.com/blog/nginx-plus-r8-released/) + +### NGINX Plus R8 Updates + +These are bug‑fix releases for NGINX Plus R8. + +NGINX Plus R8 P3
    +_24 February 2016_ + +- HTTP/2: `client_body_timeout` directive was not handled correctly + +NGINX Plus R8 P2
    +_11 February 2016_ + +- Logging: Buffer over‑read might occur while logging invalid request headers +- HTTP/2: Various fixes + +NGINX Plus R8 P1
    +_26 January 2016_ + +- Resolver: Limit `CNAME` resolutions to prevent remote attackers from causing a denial of service ([CVE-2016-0747](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0747)) + + +## NGINX Plus Release 7 (R7) +_15 September 2015_
    +_Based on NGINX Open Source 1.9.4_ + +NGINX Plus R7 is a feature release: + +- Support for HTTP/2 in the new `nginx-plus-http2`package (the `nginx-plus` and `nginx-plus-extras` packages continue to support SPDY) + + **Note:** Before installing the `nginx-plus-http2` package, you must remove the `spdy` parameter on all `listen` directives in your configuration (replace it with the `http2` and `ssl` parameters to enable support for HTTP/2). NGINX Plus fails to start if any `listen` directives have the `spdy` parameter. + +- Support for proxying [NTLM](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm) requests +- Enhancements to [TCP load balancing](https://nginx.org/en/docs/stream/ngx_stream_core_module.html) and proxying: + - [Access controls](https://nginx.org/en/docs/stream/ngx_stream_access_module.html) + - [Connection limiting](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html) + - Bandwidth limiting for [upload](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_upload_rate) and [download](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_download_rate) + - Client‑side [PROXY protocol](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_protocol) support + - Ability to [set local IP address](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_bind) of origin for outgoing connections + - New `backlog` parameter to [listen](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) directive to limit size of queue of pending connections + - New [tcp_nodelay](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#tcp_nodelay) directive to control use of OS `TCP_NODELAY` option +- More efficient distribution of connections across NGINX Plus worker processes (new `reuseport` parameter to the [listen](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive) +- [Thread pools](https://nginx.org/en/docs/ngx_core_module.html#thread_pool) for multithreaded reading and sending of files without blocking worker processes +- Live activity monitoring dashboard redesigned to use tabs +- Additional live activity monitoring metrics in the [Status](https://nginx.org/en/docs/http/ngx_http_status_module.html#compatibility) module (dataset version 6) +- Additional arguments to playlist and fragment URIs in the [HLS](https://nginx.org/en/docs/http/ngx_http_hls_module.html) module (`start`, `end`, and `offset`) +- New `-T` flag on `nginx` command to dump the configuration to standard output in a standardized format +- New [$upstream_connect_time](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_connect_time) variable to capture the connection time to upstream servers +- [sub_filter](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter) directive now supports variables in both the string being replaced and the replacement string; multiple `sub_filter` directives can appear at a configuration level +- In the `nginx-plus-extras` package: + - New [Redis](http://github.com/openresty/lua-resty-redis) module for access to Redis databases through Lua + - [Headers-More](http://github.com/openresty/headers-more-nginx-module) module updated to version 0.26 + - [Lua](http://github.com/openresty/lua-nginx-module) module updated to version 0.9.16 + - [Phusion Passenger Open Source](https://blog.phusion.nl/tag/passenger-releases/) module updated to version 5.0.15 + - [Set-Misc](http://github.com/openresty/set-misc-nginx-module) module updated to version 0.29 + +NGINX Plus R7 is supported on: + +- CentOS 5.10+, 6.5+, 7.0+ +- Debian 7.0, 8.0 +- FreeBSD 9.3, 10.1+ +- Oracle Linux 5.10+, 6.5+, 7.0+ +- RHEL 5.10+, 6.5+, 7.0+ +- SLES 12 +- Ubuntu 12.04 LTS, 14.04 LTS, 15.04 + +**Notes:** + +- Debian 6.0 is no longer supported +- SLES 11 SP3 is no longer supported +- Ubuntu 10.04 LTS and 14.10 are no longer supported +- The `nginx-plus-extras` package has additional dependencies +- NGINX Plus R7 is the last release that includes the `nginx-plus-lua` package; customers using the package will have to migrate to the `nginx-plus-extras` package in NGINX Plus R8 + +More information and important upgrade information for users of the Phusion Passenger Open Source module: [Announcing NGINX Plus Release 7](https://www.nginx.com/blog/nginx-plus-r7-released/) + + +## NGINX Plus Release 6 (R6) +_14 April 2015_
    +_Based on NGINX Open Source 1.7.11_ + +NGINX Plus R6 is a feature release: + +- TCP proxy enhancements (health checks, dynamic reconfiguration, SSL support, logging, status counters) +- New Least-Time load‑balancing algorithm +- Support for unbuffered upload ([proxy_request_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering) directive) +- Proxy SSL authentication support for HTTP and uwsgi +- Proxy cache enhancements (variables in value of [proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache) directive, new `use_temp_path` parameter to [proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) directive) +- Mail proxy supports client SSL certificates +- Enhancement to Autoindex module (new [autoindex_format](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex_format) directive) +- New live activity monitoring dashboard +- In the `nginx-plus-extras` package: + - [Lua](https://github.com/openresty/lua-nginx-module) module updated to version 0.9.16rc1 + - [Phusion Passenger Open Source](https://blog.phusion.nl/tag/passenger-releases/) module updated to version 4.0.59 + - [Set-Misc](http://github.com/openresty/set-misc-nginx-module) module updated to version 0.28 + +NGINX Plus R6 is supported on: + +- CentOS 5.10+, 6.5+, 7.0 +- Debian 6.0, 7.0, 8.0 +- FreeBSD 9.3, 10.1 +- Oracle Linux 5.10+, 6.5+, 7.0 +- RHEL 5.10+, 6.5+, 7.0 +- SLES 11 SP3, 12 +- Ubuntu 10.04 LTS, 12.04 LTS, 14.04 LTS, 14.10 + +The `nginx-plus-extras` package has additional dependencies. + +More information: [Announcing NGINX Plus Release 6 with Enhanced Load Balancing, High Availability, and Monitoring Features](https://www.nginx.com/blog/nginx-plus-r6-released/) + + +## NGINX Plus Release 5 (R5) +_2 December 2014_
    +_Based on NGINX Open Source 1.7.7_ + +NGINX Plus R5 is a feature release: + +- Proxying and load balancing of raw TCP traffic (the [Stream](https://nginx.org/en/docs/stream/ngx_stream_core_module.html) module) +- Sticky session timeout now applies from the most recent request in the session +- Upstream “draining” can be used to remove an upstream server without interrupting any user sessions (new `drain` parameter to the [upstream_conf](https://nginx.org/en/docs/http/ngx_http_upstream_conf_module.html#upstream_conf) directive) +- Improved control over request retries in the event of failure, based on number of tries and time; also available for FastCGI, memcached, SCGI, and uwsgi modules +- `Vary` field in response header is correctly handled for caching (multiple variants of the same resource can be cached); note that the on‑disk cache format has changed, so upgrading to R5 invalidates cached content +- Improved caching support for byte‑range requests +- Control of upstream bandwidth (new [proxy_limit_rate](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_limit_rate) directive) +- In the `nginx-plus-extras` package: + - [Lua](https://github.com/openresty/lua-nginx-module) module updated to version 0.9.13 + - [Phusion Passenger Open Source](https://blog.phusion.nl/tag/passenger-releases/) module updated to version 4.0.53 +- In the nginx-plus-lua package: + - [Lua](https://github.com/openresty/lua-nginx-module) module updated to version 0.9.13 + +NGINX Plus R5 is supported on: + +- CentOS 5.9, 6.5, 7.0 +- Debian 6.0, 7.0 +- FreeBSD 9.3, 10.0 +- Oracle Linux 5.10+, 6.5+, 7.0 +- RHEL 5.9, 6.5, 7.0 +- SLES 11 SP3, 12 +- Ubuntu 10.04 LTS, 12.04 LTS, 14.04 LTS, 14.10 + +The `nginx-plus-extras` and `nginx-plus-lua` packages have additional dependencies. + +More information: [NGINX Plus R5 Released](https://www.nginx.com/blog/nginx-plus-r5-released/) + + +## NGINX Plus Release 4 (R4) +_24 July 2014_
    +_Based on NGINX Open Source 1.7.3_ + +NGINX Plus R4 is a feature release: + +- Ability to verify backend SSL certificates +- Support for SNI while working with SSL backends +- Passphrases for SSL private keys can now be stored in an external file +- New load‑balancing method based on user‑defined keys with optional consistency ([hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash) directive) +- New session affinity mechanism (sticky learn) based on server‑initiated sessions +- Cache revalidation now uses `If-None-Match` header field when possible +- Conditional logging for requests (new `if` parameter to the [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) directive) +- Ability to retrieve a subset of the live activity monitoring data +- [MP4](https://nginx.org/en/docs/http/ngx_http_mp4_module.html) module now supports the `end` argument in request URIs, which sets the end point of playback +- In the `nginx-plus-extras` package: + - [Lua](https://github.com/openresty/lua-nginx-module) module updated to version 0.9.10 + - [Phusion Passenger Open Source](https://blog.phusion.nl/tag/passenger-releases/) module updated to version 4.0.45 +- In the nginx-plus-lua package: + - [Lua](https://github.com/openresty/lua-nginx-module) module updated to version 0.9.10 + +NGINX Plus R4 is supported on: + +- CentOS 5.9, 6.5, 7.0 +- Debian 6.0, 7.0 +- FreeBSD 9.2, 10.0 +- Oracle Linux 5.10+, 6.5+, 7.0 +- RHEL 5.9, 6.5, 7.0 +- SLES 11 SP3 +- Ubuntu 10.04 LTS, 12.04 LTS, 14.04 LTS + +The `nginx-plus-extras` and `nginx-plus-lua` packages have additional dependencies. + +More information: [NGINX Plus R4 Released](https://www.nginx.com/blog/nginx-plus-r4-released/) + + +## NGINX Plus Release 3 (R3) +_2 April 2014_
    +_Based on NGINX Open Source 1.5.12‑1_ + +NGINX Plus R3 is a feature release: + +- Automatic re‑resolution of hostnames in upstream groups allows group members to be updated on‑the‑fly using DNS +- New connection limits and an internal connection queue protect servers from connection overload and improve connection scheduling by NGINX Plus’ load balancing +- Support for PROXY protocol +- SPDY support updated to comply with draft 3.1 +- Additional controls over SSL have been added to control the use of session tickets and reduce time to first byte +- Support for IPv6 DNS resolution + +NGINX Plus R3 is supported on: + +- CentOS 5.9, 6.5 +- Debian 6.0, 7.0 +- FreeBSD 9.2, 10.0 +- Oracle Linux 5.10+, 6.5+, 7.0 +- RHEL 5.9, 6.5 +- SLES 11 SP3 +- Ubuntu 10.04 LTS, 12.04 LTS, 12.10, 13.10, 14.04 LTS + +The `nginx-plus-extras` and `nginx-plus-lua` packages have additional dependencies. + +More information: [NGINX Plus R3 Released](https://www.nginx.com/blog/nginx-plus-r3-released/) + + +## NGINX Plus Release 2 (R2) +_12 December 2013_
    +_Based on NGINX Open Source 1.5.7‑1_ + +NGINX Plus R2 is a feature release: + +- Enhanced sticky routing support +- Additional status metrics for virtual hosts and cache zones +- Cache purge support (also available for FastCGI) +- Support for cache revalidation +- Support for authorization based on the result of a subrequest (new [ngx_http_auth_request_module](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html) module) + +### NGINX Plus R2 Updates + +Security Update to NGINX Plus Release R2 +_21 March 2014_
    +_Based on NGINX Open Source 1.5.7‑4_ + +- Fixes vulnerability in experimental SPDY implementation in NGINX Open Source 1.5.7‑3 and earlier. + +Functional Update to NGINX Plus R2 +_5 March 2014_
    +_Based on NGINX Open Source 1.5.7‑3_ + +- NGINX Plus now correctly applies the value set with the [client_max_body_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) directive when processing HTTP requests that contain chunk‑encoded body data. + +Functional Update to NGINX Plus R2 +_13 February 2014_
    +_Based on NGINX Open Source 1.5.7‑2_ + +- Updates to MP4 and HLS streaming functionality +- Fix for premature closing of connections when using SPDY with proxy cache +- Updates to implementation of SPDY/2 +- Added **status.html** file for live activity monitoring, missing from some packages + + +## NGINX Plus Initial Release (R1) +_22 August 2013_
    +_Based on NGINX Open Source 1.5.3‑1_ + +NGINX Plus is the fully supported, commercial version of NGINX. It includes most NGINX open source modules and adds further features: + +- Application health checks +- Live activity monitoring (implemented in the Extended Status module) +- Advanced load balancing +- On‑the‑fly reconfiguration of load‑balanced upstream groups +- Extended logging capabilities +- High availability setup +- Adaptive media streaming diff --git a/content/nginx/technical-specs.md b/content/nginx/technical-specs.md new file mode 100644 index 000000000..51401b3fa --- /dev/null +++ b/content/nginx/technical-specs.md @@ -0,0 +1,205 @@ +--- +description: Platforms supported by F5 NGINX Plus and dynamically loaded modules, supported + SSL/TLS versions, supported deployment environments, and list of modules built into + NGINX Plus. +docs: DOCS-473 +doctypes: +- concept +title: Technical Specs +toc: true +weight: 400 +--- + +NGINX Plus is available only as a binary; it is not distributed as source code. For additional platforms and modules, [contact us](https://www.f5.com/products/get-f5). + +## Supported Distributions {#supported-distributions} + +{{}} +| Distribution | Supported on R33 | Supported on R32 | +|-------------------------------------|-----------------------------------------------|-----------------------------------------------| +| AlmaLinux | 8 (x86_64, aarch64)
    9 (x86_64, aarch64) | 8 (x86_64, aarch64)
    9 (x86_64, aarch64) | +| Alpine Linux | 3.17 (x86_64, aarch64) **(deprecated)**
    3.18 (x86_64, aarch64)
    3.19 (x86_64, aarch64)
    3.20 (x86_64, aarch64) **(new)** | 3.16 (x86_64, aarch64) **(deprecated)**
    3.17 (x86_64, aarch64)
    3.18 (x86_64, aarch64)
    3.19 (x86_64, aarch64) | +| Amazon Linux | 2023 (x86_64, aarch64) | 2023 (x86_64, aarch64) | +| Amazon Linux 2 | LTS (x86_64, aarch64) | LTS (x86_64, aarch64) | +| CentOS | **Not supported** | 7.4+ (x86_64) **(deprecated)** | +| Debian | 11 (x86_64, aarch64)
    12 (x86_64, aarch64) | 11 (x86_64, aarch64)
    12 (x86_64, aarch64) | +| FreeBSD | 13 (amd64)
    14 (amd64) | 13 (amd64)
    14 (amd64) | +| Oracle Linux | 8.1+ (x86_64, aarch64)
    9 (x86_64) | 7.4+ (x86_64) **(deprecated)**
    8.1+ (x86_64, aarch64)
    9 (x86_64) | +| Red Hat Enterprise Linux (RHEL) | 8.1+ (x86_64, aarch64)
    9.0+ (x86_64, aarch64) | 7.4+ (x86_64) **(deprecated)**
    8.1+ (x86_64, aarch64)
    9.0+ (x86_64, aarch64) | +| Rocky Linux | 8 (x86_64, aarch64)
    9 (x86_64, aarch64) | 8 (x86_64, aarch64)
    9 (x86_64, aarch64) | +| SUSE Linux Enterprise Server (SLES) | 12 SP5 (x86_64) **(deprecated)**
    15 SP2+ (x86_64) | 12 SP5 (x86_64)
    15 SP2+ (x86_64) | +| Ubuntu | 20.04 LTS (x86_64, aarch64)
    22.04 LTS (x86_64, aarch64)
    24.04 LTS (x86_64, aarch64) | 20.04 LTS (x86_64, aarch64)
    22.04 LTS (x86_64, aarch64)
    24.04 LTS (x86_64, aarch64 **(new)** | +{{
    }} + +--- + +## Dynamic Modules + +Dynamic modules are supported on the [same distributions as NGINX Plus](#supported-distributions), unless noted otherwise in the table below. + +{{}} +| Module | Distribution and details | +|-------------------|-----------------------------------------------------------------------------------------------------------| +| AppProtect | AlmaLinux/Rocky Linux: **Not supported**
    Alpine Linux: **Not supported**
    Amazon Linux 2: **x86_64 only**
    Amazon Linux 2023: **Not supported**
    Debian 11: **x86_64 only**
    FreeBSD: **Not supported**
    Oracle Linux 8: **x86_64 only**
    RHEL 8: **x86_64 only**
    SLES: **Not supported**
    Ubuntu 20.04: **x86_64 only** | +| Brotli | SLES 12: **Not supported** | +| GeoIP | RHEL/Oracle Linux/AlmaLinux/Rocky Linux 8.0+, 9: **Not supported**
    FreeBSD: **Not supported** | +| GeoIP2 | SLES 12: **Not supported**
    Amazon Linux 2: **Not supported** | +| HA-Keepalived | FreeBSD: **Not supported**
    Alpine Linux: **Not supported**
    Amazon Linux 2: **Not supported**
    Amazon Linux 2023: **Not supported** | +| NGINX sync | FreeBSD: **Not supported**
    Alpine Linux: **Not supported** | +| OpenTelemetry | Amazon Linux 2: **Not supported**
    SLES: **Not supported** | +| OpenTracing | SLES 12: **Not supported** | +{{
    }} + +--- + +## Supported SSL/TLS versions + +NGINX Plus supports the following SSL/TLS protocols: +- SSLv2 +- SSLv3 +- TLSv1 +- TLSv1.1 +- TLSv1.2 +- TLSv1.3 + +You can configure which protocols to enable or disable with the [ssl_protocols](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) directive. + +TLSv1.2 and earlier are supported on all operating systems listed in [Supported Distributions](#supported-distributions). + +TLSv1.3 is supported starting in NGINX Plus R17 and is enabled by default in NGINX Plus R29 and later. It requires OpenSSL 1.1.1 or higher. Note that not all operating systems supported by NGINX Plus include OpenSSL 1.1.1. Check your operating system's documentation to confirm TLSv1.3 compatibility. + +--- + +## Supported Deployment Environments + +- Bare metal +- Container +- Public cloud: AWS, Google Cloud Platform, Microsoft Azure +- Virtual machine + +--- + +## Recommended Hardware +See [Sizing Guide for Deploying NGINX Plus on Bare Metal Servers](https://www.nginx.com/resources/datasheets/nginx-plus-sizing-guide/) + +--- + +## Modules in the NGINX Plus Package + +### Core + +- [Core](https://nginx.org/en/docs/ngx_core_module.html) – Control basic functioning (mutexes, events, thread pools, workers, and so on) + +### Clustering + +- [Zone Sync](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html) – Synchronize [shared memory zones](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#zone) among nodes in a cluster + +### HTTP Core + +- [HTTP Core](https://nginx.org/en/docs/http/ngx_http_core_module.html) – Process HTTP traffic +- [Addition](https://nginx.org/en/docs/http/ngx_http_addition_module.html) – Prepend and append data to a response +- [Auto Index](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html) – Generate directory listings +- [Charset](https://nginx.org/en/docs/http/ngx_http_charset_module.html) – Add character set in `Content-Type` field of HTTP response header, and define or convert between character sets +- [Empty GIF](https://nginx.org/en/docs/http/ngx_http_empty_gif_module.html) – Generate empty image response +- [Gunzip](https://nginx.org/en/docs/http/ngx_http_gunzip_module.html) – Decompress responses for clients that don’t support compression +- [Gzip](https://nginx.org/en/docs/http/ngx_http_gzip_module.html) – Use GZIP to compress HTTP responses +- [Gzip Static](https://nginx.org/en/docs/http/ngx_http_gzip_static_module.html) – Serve pre-compressed files from disk +- [Headers](https://nginx.org/en/docs/http/ngx_http_headers_module.html) – Add fields to HTTP response headers, including `Cache-Control` and `Expires` +- [Index](https://nginx.org/en/docs/http/ngx_http_index_module.html) – Specify index files used in directory requests +- [Internal Redirect](https://nginx.org/en/docs/http/ngx_http_internal_redirect_module.html) – Allow internal redirects after checking request or connection processing limits, and access limits +- [Random Index](https://nginx.org/en/docs/http/ngx_http_random_index_module.html) – Select random index file for directory request +- [Real IP](https://nginx.org/en/docs/http/ngx_http_realip_module.html) – Determine true origin IP address for proxied traffic +- [SSI](https://nginx.org/en/docs/http/ngx_http_ssi_module.html) – Process Server Side Includes (SSI) commands +- [User ID](https://nginx.org/en/docs/http/ngx_http_userid_module.html) – Set cookies that uniquely identify clients +- [WebDAV](https://nginx.org/en/docs/http/ngx_http_dav_module.html) – Implement WebDAV file management + +### HTTP Access Control and Authentication + +- [Access](https://nginx.org/en/docs/http/ngx_http_access_module.html) – Control access based on client IP address (support access control lists [ACLs]) +- [Auth Basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) – Implement HTTP Basic Authentication scheme +- [Auth JWT](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html) – Validate JSON Web Tokens +- [Auth Request](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html) – Determine client authorization using subrequests to external authentication server +- [Referer](https://nginx.org/en/docs/http/ngx_http_referer_module.html) – Control access based on `Referer` field in HTTP request header +- [Secure Link](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html) – Process encrypted, time-limited links to content + +### HTTP Advanced Configuration + +- [Browser](https://nginx.org/en/docs/http/ngx_http_browser_module.html) – Create variables based on `User-Agent` field in HTTP request header +- [Cache Slice](https://nginx.org/en/docs/http/ngx_http_slice_module.html) – Create byte-range segments of large files, for more efficient caching +- [Geo](https://nginx.org/en/docs/http/ngx_http_geo_module.html) – Create variables based on client IP address +- [Map](https://nginx.org/en/docs/http/ngx_http_map_module.html) – Create variables based on other variables in requests +- [Rewrite](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html) – Test and change URI of request +- [Split Clients](https://nginx.org/en/docs/http/ngx_http_split_clients_module.html) – Partition clients for A/B testing +- [Sub](https://nginx.org/en/docs/http/ngx_http_sub_module.html) – Replace text string in response (rewrite content) + +### HTTP Logging + +- [Log](https://nginx.org/en/docs/http/ngx_http_log_module.html) – Log HTTP transactions locally or to `syslog` +- [Session Log](https://nginx.org/en/docs/http/ngx_http_session_log_module.html) – Log HTTP transactions aggregated per session + +### HTTP Media Delivery + +- [F4F](https://nginx.org/en/docs/http/ngx_http_f4f_module.html) – Stream HDS (Adobe HTTP Dynamic Streaming; filename extensions **.f4f**, **.f4m**, **.f4x**) +- [FLV](https://nginx.org/en/docs/http/ngx_http_flv_module.html) – Stream FLV (Flash Video; filename extension **.flv**) +- [HLS](https://nginx.org/en/docs/http/ngx_http_hls_module.html) – Stream HLS (Apple HTTP Live Streaming; filename extensions **.m3u8**, **.ts**) dynamically generated from MP4 or MOV (filename extensions **.m4a**, **.m4v**, **.mov**, **.mp4**, and **.qt**) +- [MP4](https://nginx.org/en/docs/http/ngx_http_mp4_module.html) – Stream MP4 (filename extensions **.m4a**, **.m4v**, **.mp4**) +- Streaming of RTMP and DASH is provided by the third-party [RTMP](https://github.com/arut/nginx-rtmp-module) module + +### HTTP Proxying + +- [FastCGI](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html) – Proxy and cache requests to FastCGI server +- [gRPC](https://nginx.org/en/docs/http/ngx_http_grpc_module.html) – Proxy requests to gRPC server +- [Memcached](https://nginx.org/en/docs/http/ngx_http_memcached_module.html) – Proxy requests to memcached server +- [Mirror](https://nginx.org/en/docs/http/ngx_http_mirror_module.html) – Send copy of requests to one or more additional servers +- [Proxy](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) – Proxy and cache requests to HTTP server +- [SCGI](https://nginx.org/en/docs/http/ngx_http_scgi_module.html) – Proxy and cache requests to SCGI server +- [Upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html) – Proxy and cache requests to load-balanced pool of servers +- [Upstream Health Checks](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html) – Verify servers in load-balanced pool are operational +- [uwsgi](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html) – Proxy and cache requests to uwsgi server + +### HTTP Transaction Shaping + +- [Limit Connections](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html) – Limit concurrent connections from a client IP address or other keyed value +- [Limit Requests](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html) – Limit rate of request processing for a client IP address or other keyed value +- [Limit Responses](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate) – Limit rate of responses per client connection + +### HTTP/2 and SSL/TLS + +- [HTTP/2](https://nginx.org/en/docs/http/ngx_http_v2_module.html) – Process HTTP/2 traffic +- [SSL/TLS](https://nginx.org/en/docs/http/ngx_http_ssl_module.html) – Process HTTPS traffic + +### Mail + +- [Mail Core](https://nginx.org/en/docs/mail/ngx_mail_core_module.html) – Proxy mail traffic +- [Auth HTTP](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html) – Offload authentication processing from HTTP server +- [IMAP](https://nginx.org/en/docs/mail/ngx_mail_imap_module.html) – Implement capabilities and authentication methods for IMAP +- [POP3](https://nginx.org/en/docs/mail/ngx_mail_pop3_module.html) – Implement authentication methods for POP3 traffic +- [Proxy](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html) – Support proxy-related parameters for mail protocols +- [SMTP](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html) – Define accepted SASL authentication methods for SMTP clients +- [SSL/TLS](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html) – Implement SSL, STARTTLS, and TLS for mail protocols + +### Programmability and Monitoring + +- [NGINX Plus API](https://nginx.org/en/docs/http/ngx_http_api_module.html) – Provide REST API for accessing metrics, configuring upstream server groups dynamically, and managing key-value pairs, without the need to reload NGINX configuration +- [Key-Value Store](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) – Create variables with values taken from key-value pairs managed by the [NGINX Plus API](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_keyvals_) +- [Management](https://nginx.org/en/docs/ngx_mgmt_module.html) – Configure licensing and usage reporting of NGINX Plus installation to F5 licensing endpoint or [NGINX Instance Manager]({{< relref "nim/index.md" >}}) + +### TCP and UDP Proxying and Load Balancing + +- [Stream](https://nginx.org/en/docs/stream/ngx_stream_module.html) – Process TCP and UDP traffic +- [Access](https://nginx.org/en/docs/stream/ngx_stream_access_module.html) – Support IP-based access control lists (ACLs) +- [Geo](https://nginx.org/en/docs/stream/ngx_stream_geo_module.html) – Create variables based on client IP address +- [Limit Conn](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html) – Limit concurrent connections by key +- [Log](https://nginx.org/en/docs/stream/ngx_stream_log_module.html) – Log TCP and UDP transactions +- [Map](https://nginx.org/en/docs/stream/ngx_stream_map_module.html) – Create variables based on other variables in requests +- [MQTT Preread](https://nginx.org/en/docs/stream/ngx_stream_mqtt_preread_module.html) – Forward MQTT traffic without processing it +- [MQTT Filter](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html) – Process Message Queuing Telemetry Transport protocol (MQTT) protocol +- [Proxy](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html) – Proxy requests to TCP and UDP servers +- [Pass](https://nginx.org/en/docs/stream/ngx_stream_pass_module.html) – Pass any accepted client connection to any configured listening socket in http, stream, mail, and other similar modules +- [Real IP](https://nginx.org/en/docs/stream/ngx_stream_realip_module.html) – Determine true origin IP address for proxied traffic +- [Return](https://nginx.org/en/docs/stream/ngx_stream_return_module.html) – Return specified value to client and close connection +- [Split Clients](https://nginx.org/en/docs/stream/ngx_stream_split_clients_module.html) – Partition clients for A/B testing +- [SSL/TLS](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html) – Process TCP traffic secured with SSL/TLS +- [SSL/TLS Preread](https://nginx.org/en/docs/stream/ngx_stream_ssl_preread_module.html) – Forward TCP traffic secured with SSL/TLS without decrypting it +- [Upstream](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html) – Proxy and cache traffic to load-balanced pool of servers +- [Upstream Health Checks](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html) – Verify servers in load-balanced pool are operational diff --git a/content/nim/_index.md b/content/nim/_index.md new file mode 100644 index 000000000..ba5824b5b --- /dev/null +++ b/content/nim/_index.md @@ -0,0 +1,8 @@ +--- +title: NGINX Instance Manager +description: Track and control NGINX Open Source and NGINX Plus instances. +url: /nginx-instance-manager/ +cascade: + logo: "NGINX-Instance-Manager-product-icon.png" +--- + diff --git a/content/nim/admin-guide/_index.md b/content/nim/admin-guide/_index.md new file mode 100644 index 000000000..5dafe4707 --- /dev/null +++ b/content/nim/admin-guide/_index.md @@ -0,0 +1,5 @@ +--- +title: Platform administration +weight: 40 +url: /nginx-instance-manager/admin-guide/ +--- \ No newline at end of file diff --git a/content/nim/admin-guide/authentication/_index.md b/content/nim/admin-guide/authentication/_index.md new file mode 100644 index 000000000..021e30b44 --- /dev/null +++ b/content/nim/admin-guide/authentication/_index.md @@ -0,0 +1,6 @@ +--- +title: Authentication +weight: 100 +url: /nginx-management-suite/admin-guide/authentication/ + +--- diff --git a/content/nim/admin-guide/authentication/basic-auth/_index.md b/content/nim/admin-guide/authentication/basic-auth/_index.md new file mode 100644 index 000000000..28093e658 --- /dev/null +++ b/content/nim/admin-guide/authentication/basic-auth/_index.md @@ -0,0 +1,5 @@ +--- +title: Basic auth +weight: 5000 +url: /nginx-instance-manager/admin-guide/authentication/basic-auth/ +--- diff --git a/content/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md b/content/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md new file mode 100644 index 000000000..697b4dbae --- /dev/null +++ b/content/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md @@ -0,0 +1,103 @@ +--- +title: Set up basic authentication +description: Learn how to manage user access in NGINX Instance Manager using basic authentication with NGINX as a front-end proxy. This guide covers first-time login, creating additional users, and setting passwords. +toc: true +weight: 10 +type: how-to +product: NIM +docs: DOCS-792 +--- + + + +## Overview + +NGINX Instance Manager uses NGINX as a front-end proxy and for managing user access. By default, NGINX Instance Manager uses basic authentication, requiring you to send your username and password with each request to confirm your identity. When logging in for the first time, use the default `admin` account and password. After that, you can create additional user accounts. Instructions for adding users and setting passwords are provided below. + +{{< call-out "warning" "Security consideration" >}} While convenient, basic authentication is less secure than other methods: credentials are sent as base64-encoded text, which is not a secure encryption method. If your data is intercepted, the encoding can be easily reversed. If you're using NGINX Plus for your front-end proxy, consider [switching to OpenID Connect (OIDC) for authentication]({{< relref "/nim/admin-guide/authentication/oidc/getting-started.md" >}}). For production environments, we strongly recommend OIDC.{{< /call-out >}} + +## Default admin user + +When you install NGINX Instance Manager, a default `admin` user is created with a randomly generated password that is shown in the installation output. + +You can change the default `admin` password by running the provided script or by manually editing the `/etc/nms/nginx/.htpasswd` file. For instructions, see the [Set user passwords](#set-basic-passwords) section below. + +The `admin` user is associated with an [admin role]({{< relref "/nim/admin-guide/rbac/overview-rbac.md" >}}) that grants full permissions for all modules and features. You can delete the `admin` user, but only after assigning the admin role to another user. The admin role cannot be deleted and must always be assigned to at least one user. + +## Create new users {#create-users} + +{{< note >}} Please note that the web interface does not support adding user passwords directly. Once you've created new users, refer to the following steps to [set user passwords](#set-basic-passwords).{{< /note >}} + +To add users, take the following steps: + +1. In a web browser, go to the FQDN for your NGINX Instance Manager host and log in. +1. Select the **Settings** (gear) icon in the upper-right corner. +1. On the left menu, select **Users**. +1. Select **Create**. +1. On the **Create User** form, enter the details for the user: + + - **Username**: A unique username to identify the user. + - **Email**: The user's email address. + - **First Name**: The user's first name. + - **Last Name**: The user's last name. + - **Description**: An optional brief description of the user. + +1. In the **Roles** list, select one or more roles to assign to the user. + + Learn more about roles and how to create them in the [Getting started with RBAC]({{< relref "/nim/admin-guide/rbac/overview-rbac" >}}) topic. + +1. (Required for Basic Auth) Add each user's username and password to the `/etc/nms/nginx/.htpasswd` file on the NGINX Instance Manager server. You can choose to run a script or make the changes manually. Refer to the [Set user passwords]({{< relref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md#set-basic-passwords" >}}) topic for instructions. + +{{< see-also >}} Refer to the [Provision users and groups with SCIM]({{< relref "/nim/admin-guide/authentication/oidc/scim-provisioning.md" >}}) topic for instructions on automating user and group creation using the SCIM API. {{< /see-also >}} + + +## Set user passwords {#set-basic-passwords} + +{{< before-you-begin >}} +Before you can set users' passwords, ensure you have [created users](#create-users) in NGINX Instance Manager. Once you've created the users, you can use one of the following options to set their passwords. +{{< /before-you-begin >}} + +### (Recommended) Use the provided script {#set-basic-passwords-script} + +You can use the `basic_passwords.sh` script to add a user's encrypted password to the `/etc/nms/nginx/.htpasswd` file on the NGINX Instance Manager host. + +{{}}The `basic_passwords.sh` script requires the [OpenSSL](https://www.openssl.org) package. We strongly recommend **OpenSSL v1.1.1 or later**.{{}} + +To change a user's password with the `basic_passwords.sh` script: + +1. Open an SSH connection to your NGINX Instance Manager host and log in. +2. Run the `basic_passwords.sh` script, providing the username you want to update and the desired password. Be sure to enclose the password in single quotation marks. + + ```bash + sudo bash /etc/nms/scripts/basic_passwords.sh '' + ``` + + For example: + + ```bash + sudo bash /etc/nms/scripts/basic_passwords.sh johndoe 'jelly22fi$h' + ``` + +### Manually set user passwords {#manually-set-basic-passwords} + +To manually set user passwords: + +1. Open the `/etc/nms/nginx/.htpasswd` file on the NGINX Instance Manager host and add the username and password for each user. +2. Save the changes to the file. + +{{< see-also >}}Refer to the documentation [Restricting access with HTTP basic auth]({{< relref "/nginx/admin-guide/security-controls/configuring-http-basic-authentication.md" >}}) for detailed instructions on working with the password file.{{< /see-also >}} + +## Making API requests with basic authentication + +{{< include "nim/admin-guide/auth/basic-auth-api-requests.md" >}} + +## Ending your browser session + +With basic authentication, NGINX Instance Manager does not have a "Log Out" button. To end your session, close the web browser you're using. + +Closing the browser will void the authentication token or session cookie tied to your account. This step is essential for securing your account and preventing unauthorized access to NGINX Instance Manager. diff --git a/content/nim/admin-guide/authentication/oidc/_index.md b/content/nim/admin-guide/authentication/oidc/_index.md new file mode 100644 index 000000000..216e73156 --- /dev/null +++ b/content/nim/admin-guide/authentication/oidc/_index.md @@ -0,0 +1,5 @@ +--- +title: OIDC +weight: 5000 +url: /nginx-instance-manager/admin-guide/authentication/oidc/ +--- diff --git a/content/nim/admin-guide/authentication/oidc/getting-started.md b/content/nim/admin-guide/authentication/oidc/getting-started.md new file mode 100644 index 000000000..e5ebb1fd8 --- /dev/null +++ b/content/nim/admin-guide/authentication/oidc/getting-started.md @@ -0,0 +1,105 @@ +--- +description: '' +docs: DOCS-1267 +doctypes: +- tutorial +tags: +- docs +title: Get started with OIDC +toc: true +weight: 1 +--- + +## Overview + +We recommend using OpenID Connect (OIDC) as the preferred authentication method for NGINX Instance Manager. OIDC offers several advantages, including Single Sign-On (SSO) for users and simplified user management for administrators through user groups. OIDC also enables easy scalability and streamlined user access management. + +NGINX Instance Manager’s implementation of OIDC is designed to work with any Identity Provider (IdP) that supports the OIDC protocol. The instructions below are general and can be applied to any IdP. + +{{}}To learn how to configure OIDC with a specific identity provider, refer to the linked topics in the [Set up specific IdPs for OIDC](#oidc-specific-idps) section at the bottom of this page.{{}} + +## Create roles and user groups in NGINX Instance Manager {#configure-nim} + +When using OIDC for authentication, administrators don't need to create and manage users in NGINX Instance Manager. Instead, they create user groups in NGINX Instance Manager that match groups in their IdP. The roles assigned to the user group set the access level and permissions for users based on their group membership. Users who aren't in a group with an assigned role won't have access to NGINX Instance Manager. + +To grant users access using OIDC, follow these steps: + +1. Create a role in NGINX Instance Manager. +2. Create a user group and assign a role to it. **Important**: The group name must exactly match a group name in your IdP. +3. Set up OIDC. + +### Create a role {#create-role} + +{{< include "nim/rbac/create-roles.md" >}} + +#### Next steps + +After creating a role, assign it to a user group within NGINX Instance Manager that matches a group in your IdP. Proceed to the [create a user group with an assigned role](#create-user-group) section for detailed instructions. + +### Create a user group with an assigned role {#create-user-group} + +{{< include "nim/rbac/create-user-groups.md" >}} + +#### Next steps + +Now that you've created a user group and assigned a role in NGINX Instance Manager, continue to the [configure OIDC](#configure-oidc) section. These instructions will help you integrate with your IdP and ensure user groups and permissions work as expected. + +## Configure OIDC {#configure-oidc} + +### Before you begin + +{{}} +Before switching from basic authentication to OIDC, make sure to add at least one admin user to your IdP. Failure to do so can result in admin users being locked out of NGINX Instance Manager. If this occurs, you can restore access by reverting back to basic authentication. +{{}} + +When you configure OIDC for NGINX Instance Manager, basic authentication will be disabled for all users, including the default `admin` user. To ensure uninterrupted access, create a user group in NGINX Instance Manager that corresponds to a group in your IdP and assign the appropriate roles. + +- Follow the instructions above to [grant users access](#granting-users-access) before proceeding. + +### Requirements + +The following requirements must be met before you can use OIDC with NGINX Instance Manager: + +1. [Install Instance Manager]({{< relref "/nim/deploy/vm-bare-metal/install.md" >}}) on a server that also has [NGINX Plus R21 or newer]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}) installed. Ensure the server hosting NGINX Plus has a fully qualified domain name (FQDN). + +2. [Install the NGINX JavaScript module (njs)](https://www.nginx.com/blog/introduction-nginscript/) on the same server as Instance Manager. This module is required for managing communications between NGINX Plus and the identity provider. + +3. Configure an IdP to provide authentication services. This includes setting up authentication policies, scopes, and client credentials within your IdP. + +### Enable OIDC + +To enable OIDC, follow these steps to update the OIDC configuration file: + +1. Open `/etc/nms/nginx/oidc/openid_configuration.conf` in a text editor and replace the default placeholder values with the relevant information for your IdP. (For more information on the variables, refer to the [OIDC configuration values](#oidc-variables).) Save the changes. + +1. Open `/etc/nginx/conf.d/nms-http.conf` in a text editor and uncomment the OIDC settings that begin with `#OIDC`. Comment out the basic authentication settings. Save the changes. + +1. Run `sudo nginx -t` to validate the configuration and ensure there are no errors. + +1. Reload NGINX and apply the new configuration with `sudo nginx -s reload`. + +### OIDC configuration values {#oidc-variables} + +The sections below provide detailed descriptions of the OIDC configuration values. + +#### Metadata from well-known endpoints + +- **$oidc_authz_endpoint**: The URL of the IdP’s OAuth 2.0 Authorization endpoint. +- **$oidc_jwt_keyfile**: The URL of the IdP’s JSON Web Key Set (JWKS) document. +- **$oidc_logout_endpoint**: The URL of the IdP’s end_session endpoint. +- **$oidc_token_endpoint**: The URL of the IdP’s OAuth 2.0 Token endpoint. +- **$oidc_userinfo_endpoint**: The URL of the IdP’s UserInfo endpoint. +- **$oidc_host**: The URL of the IdP’s application (e.g., `https://{my-app}.okta.com`). +- **$oidc_scopes**: List of OAuth 2.0 scope values supported by the server (e.g., `openid+profile+email+offline_access`). + +#### Custom configuration for well-known endpoints + +For custom settings, adjust parameters such as `$oidc_authz_path_params_enable`, `$oidc_logout_query_params`, and others to match your IdP’s needs. + + +## Set up specific IdPs for OIDC {#oidc-specific-idps} + +For specific IdP setup instructions, refer to the following: + +- [Set up Microsoft Entra as an OIDC identity provider]({{< relref "/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md" >}}) +- [Set up Keycloak as an OIDC identity provider]({{< relref "/nim/admin-guide/authentication/oidc/keycloak-setup.md" >}}) diff --git a/content/nim/admin-guide/authentication/oidc/keycloak-setup.md b/content/nim/admin-guide/authentication/oidc/keycloak-setup.md new file mode 100644 index 000000000..cfc661138 --- /dev/null +++ b/content/nim/admin-guide/authentication/oidc/keycloak-setup.md @@ -0,0 +1,250 @@ +--- +docs: DOCS-1268 +doctypes: +- tutorial +tags: +- docs +title: "Keycloak: Set up OIDC authentication" +toc: true +weight: 400 +--- + +## Overview + +This guide explains how to configure Keycloak as an identity provider (IdP) for F5 NGINX Instance Manager. By implementing OIDC for authentication, administrators can simplify user management in NGINX Instance Manager. Instead of creating and managing users individually, administrators can create user groups in NGINX Instance Manager that align with groups in their Identity Provider. Access and permissions for users are determined by the roles assigned to their respective user groups. Users from the Identity Provider who are not part of a group with an assigned role will not have access to NGINX Instance Manager. + +We strongly recommend OpenID Connect (OIDC) as the preferred authentication method for NGINX Instance Manager. OIDC brings several benefits, including Single Sign-On (SSO) and simplified user management through user groups. + +## Requirements + +To follow the instructions in this guide, ensure you have the following setup for your Keycloak server and NGINX Instance Manager host: + +### Keycloak Server + +- Set up a Keycloak server. Refer to the Keycloak [Getting Started](https://www.keycloak.org/guides#getting-started) and [Server](https://www.keycloak.org/guides#server) documentation for setup instructions. You will need to [create a Realm](https://www.keycloak.org/docs/latest/server_admin/#configuring-realms) with an OpenID Endpoint Configuration enabled. + +### NGINX Instance Manager + +On the NGINX Instance Manager host, complete the following: + +- [Install NGINX Plus R25 or a later version]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}). Ensure the server hosting NGINX Plus has a fully qualified domain name (FQDN). +- [Install NGINX Instance Manager]({{< relref "/nim/deploy/vm-bare-metal/install.md" >}}). +- [Install the NGINX JavaScript module (njs)](https://www.nginx.com/blog/introduction-nginscript/). This module is needed for managing communications between NGINX Plus and the identity provider. + +## Configure Keycloak {#configure-keycloak} + +### Create Keycloak Client + +Follow these steps to configure Keycloak. + +1. Log in to Keycloak as an administrator. +2. In the navigation menu, select **Clients**. +3. Select **Create**. +4. On the **Add Client** form, in the **Client ID** box, type `nim` as the name for the client. +5. In the **Client Protocol** list, select **openid-connect**. +6. Set the **Root URL** to the URL of the NGINX Instance Manager instance, for example, `https://:443/_codexch`. +7. Select **Save**. + +After the client is created, configure it as follows: + +1. On the **Settings** tab, in the **Access Type** list, select **confidential**. +2. On the **Mappers** tab, select **Add Builtin**, and select **groups**. This exports the user's Keycloak Realm Role information for NGINX Instance Manager to use. + +### Create Keycloak Roles + +NGINX Instance Manager User Groups will map to Keycloak **Realm Roles**; Keycloak Client Roles are **not** mapped. Use Keycloak top-level roles (Realm Roles). + +1. In the navigation menu, select **Realm Roles** (or select **Roles** and then the **Realm Roles** tab if using an older version of Keycloak). +2. Select **Create Role**. +3. In the **Role Name** box, type the name of the first group you created in NGINX Instance Manager, for example, `nim-admins`. +4. Select **Save**. +5. Repeat steps 1–3 for all the groups you want to provide access to, for example, `nim-users` and `nim-nap-users`. + +### Create Keycloak Users + +Create the users that will be allowed to log in to NGINX Instance Manager. + +1. In the navigation bar, select **Users**. +2. Select **Add User**. +3. In the **Username** box, type the user's name. +4. In the **Email** box, type the user's email address. NGINX Instance Manager will use this email address as the user's identifier when setting its headers. +5. Select **Save**. +6. After creating the user, go to the **Credentials** tab. +7. Provide a **Password**, confirm it, and select **Set Password**. +8. On the **Role Mappings** tab, select the roles you want to assign, such as `nim-admins`, `nim-users`, or `nim-nap-users`. +9. Select **Add selected**. + +## Configure NGINX Instance Manager {#create-roles-user-groups} + +### Create Roles in NGINX Instance Manager + +{{< include "nim/rbac/create-roles.md" >}} + +### Create User Groups in NGINX Instance Manager + +{{< include "nim/rbac/create-user-groups.md" >}} + +## Configure NGINX Instance Manager to use Keycloak {#configure-nim} + +{{}} +Some file names in this guide, such as `nms-http.conf` and directories like `/etc/nms/nginx/`, still use the `nms` naming convention. This is for backward compatibility and does not affect the functionality of NGINX Instance Manager. +{{}} + +To configure NGINX Instance Manager to use Keycloak as the OIDC identity provider, follow these steps. + +### Set Keycloak Secret as an Environment Variable + +Set the Keycloak secret as an environment variable on the NGINX Instance Manager host. + +{{< call-out "important" "Security consideration" >}}When setting a client secret as an environment variable, ensure that the environment has strict access controls. Only authorized users or processes should be able to view or modify the environment variables. Consider encrypting the value and regularly rotating the client secret.{{}} + +To copy the Keycloak secret: + +1. Open the Keycloak user interface. +2. Select the **Clients** tab, then select the **nim** client. +3. On the **Credentials** tab, copy the **Secret** value. + +To set the Keycloak secret as an environment variable: + +1. Open an SSH connection to your NGINX Instance Manager host and log in. +2. Run the following command, replacing `` with the secret value you copied: + + ```bash + export KEYCLOAK_SECRET= + ``` + +### Configure OIDC Settings + +To configure NGINX Instance Manager with the necessary OIDC settings, follow these steps: + +- Export the environment variables: + + ```bash + # Either the FQDN or the IP address is suitable for these environment variables. + export KEYCLOAK_IP="" + export NIM_IP="" + export KEYCLOAK_CLIENT_ID="" + export KEYCLOAK_CLIENT_SECRET="" + + # Choose an appropriate Hash-Based Message Authentication Code (HMAC) + export HMAC_KEY="" + + export KEYCLOAK_AUTH_ENDPOINT=$(curl -k "https://$KEYCLOAK_IP:8443/auth/realms//.well-known/openid-configuration" | jq -r ".authorization_endpoint") + export KEYCLOAK_TOKEN_ENDPOINT=$(curl -k "https://$KEYCLOAK_IP:8443/auth/realms//.well-known/openid-configuration" | jq -r ".token_endpoint") + export KEYCLOAK_KEYS_ENDPOINT=$(curl -k "https://$KEYCLOAK_IP:8443/auth/realms//.well-known/openid-configuration" | jq -r ".jwks_uri") + ``` + +- Back up the original configuration files: + + ```bash + sudo cp /etc/nms/nginx/oidc/openid_configuration.conf ~/openid_configuration.conf.orig + sudo cp /etc/nginx/conf.d/nms-http.conf ~/nms-http.conf.orig + ``` + +- Copy the OpenID configuration for NGINX to `/tmp` so you can replace the necessary values: + + ```bash + sudo cp /etc/nms/nginx/oidc/openid_configuration.conf /tmp/openid_configuration.conf + sudo sed -i'.bak' \ + -e "s%OIDC_CLIENT_ID%${KEYCLOAK_CLIENT_ID}%" \ + -e "s%SERVER_FQDN%${NIM_IP}%" \ + -e "s%OIDC_AUTH_ENDPOINT%${KEYCLOAK_AUTH_ENDPOINT}%" \ + -e "s%OIDC_TOKEN_ENDPOINT%${KEYCLOAK_TOKEN_ENDPOINT}%" \ + -e "s%OIDC_KEYS_ENDPOINT%${KEYCLOAK_KEYS_ENDPOINT}%" \ + -e "s%OIDC_CLIENT_SECRET%${KEYCLOAK_CLIENT_SECRET}%" \ + -e "s%OIDC_HMAC_KEY%${HMAC_KEY}%" \ + /tmp/openid_configuration.conf + ``` + +- Uncomment the relevant Keycloak sections in `/tmp/openid_configuration.conf`: + + ```yaml + # Enable when using OIDC with keycloak + map $http_authorization $groups_claim { + default $jwt_claim_groups; + } + + + map $http_authorization $user_email { + "~^Bearer.*" '$jwt_clientId@$oidc_domain'; + default $jwt_claim_email; + } + ``` + +- Copy the `nms-http.conf` file to `/tmp` to replace the necessary values: + + ```bash + sudo cp /etc/nginx/conf.d/nms-http.conf /tmp/nms-http.conf + ``` + +- Uncomment the OIDC sections in `nms-http.conf`: + + ```yaml + # Enable when using OIDC + log_format oidc_jwt '$remote_addr - $jwt_claim_sub [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" "$http_user_agent" ' + '"$http_x_forwarded_for"'; + ``` + + ```yaml + # OIDC -- client configuration uncomment include to enable + include /etc/nms/nginx/oidc/openid_configuration.conf; + ``` + + ```yaml + ## For OIDC Authentication: authorization code flow and Relying Party processing + # OIDC - remove comment from following directives to enable + add_header Nginx-Management-Suite-Auth "OIDC"; + include /etc/nms/nginx/oidc/openid_connect.conf; + ``` + + ```yaml + # OIDC: use email as a unique identifier + # NOTE: the username is dependent upon claims provided by your IdP + proxy_set_header Nginx-Management-Suite-Auth "OIDC"; + proxy_set_header Nginx-Management-Suite-User $user_email; + proxy_set_header Nginx-Management-Suite-Groups $groups_claim; + proxy_set_header Nginx-Management-Suite-ExternalId $jwt_claim_sub; + ``` + + Also uncomment all sections starting with `# OIDC authentication (uncomment to enable)`. + +- Comment out all the Basic Auth sections in `nms-http.conf`: + + ```yaml + ## For use with basic auth + #auth_basic_user_file /etc/nms/nginx/.htpasswd; + ## auth type indication to the client + #add_header Nginx-Management-Suite-Auth "Basic"; + ``` + + ```yaml + # HTTP Basic: + #proxy_set_header Nginx-Management-Suite-User $remote_user; + #proxy_set_header Nginx-Management-Suite-Groups ""; + #proxy_set_header Nginx-Management-Suite-ExternalId ""; + ``` + +- Copy the modified configuration files back to their original locations: + + ```bash + sudo cp /tmp/nms-http.conf /etc/nginx/conf.d/nms-http.conf + sudo cp /tmp/openid_configuration.conf /etc/nms/nginx/oidc/openid_configuration.conf + ``` + +- Run `sudo nginx -t` to verify the config has no errors. +- Reload NGINX by running `sudo nginx -s reload`. + +## Troubleshooting + +To revert to Basic Auth for troubleshooting authentication issues, run: + +```bash +sudo cp ~/openid_configuration.conf.orig /etc/nms/nginx/oidc/openid_configuration.conf +sudo cp ~/nms-http.conf.orig /etc/nginx/conf.d/nms-http.conf +sudo nginx -s reload +``` + +## Try It Out + +Open NGINX Instance Manager by going to `https:///ui`. You will be redirected to the Keycloak login page. Log in with the credentials you created in Keycloak. diff --git a/content/nim/admin-guide/authentication/oidc/microsoft-entra-automation.md b/content/nim/admin-guide/authentication/oidc/microsoft-entra-automation.md new file mode 100644 index 000000000..569e5f05f --- /dev/null +++ b/content/nim/admin-guide/authentication/oidc/microsoft-entra-automation.md @@ -0,0 +1,169 @@ +--- +docs: DOCS-1197 +doctypes: +- tutorial +tags: +- docs +title: Automate OIDC with Microsoft Entra +toc: true +weight: 300 +--- + +## Overview + +This guide explains how to secure NGINX Instance Manager with OpenID Connect (OIDC) using the client credentials flow and Microsoft Entra as the identity provider. You can use this setup for automation services, such as in CI/CD pipelines. + +## Before you begin + +{{}} +Before proceeding, first secure NGINX Instance Manager with OpenID Connect (OIDC) using Microsoft Entra as the identity provider. Complete the steps in the [Set up OIDC authentication with Microsoft Entra]({{< relref "/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md" >}}) guide. Afterward, you'll have a registered application (e.g., "NGINX Instance Manager") in Microsoft Entra, as well as a client ID and secret to configure automation. +{{}} + +## Configure Azure + +### Register a new application for your automation + +1. Log in to the [Azure portal](https://portal.azure.com/#home). +2. Select **Microsoft Entra** from the list of Azure services. +3. In the left navigation menu, under **Manage**, select **App registrations**. +4. Select **New registration**. +5. Complete the following: + - In the **Name** field, enter the name of the application (e.g., "Automation"). + - Select **Accounts in this organizational directory only** for account types. + - Leave **Redirect URI** blank. +6. Select **Register**. +7. On the confirmation page, note the following information for later use: + - **Application (client) ID** + - **Directory (tenant) ID** + +### Expose the application API + +1. In the left navigation menu, under **Manage**, select **Expose an API**. +2. Select **Set** next to **Application ID URI**. +3. Optionally, enter a friendly URI for the application, or leave the default value. +4. Select **Save**. + +### Create an app role {#create-app-role} + +1. In the left navigation menu, under **Manage**, select **App roles**. +2. Select **Create app role**. +3. Fill in the role details. Use the information from an existing user group in NGINX Instance Manager, such as from the [Create user groups in Instance Manager]({{< relref "/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md#create-user-groups-in-nginx-instance-manager" >}}) step: + - In the **Display name** field, enter a role name (e.g., "Admin"). + - In **Allowed member types**, select **Applications**. + - In the **Value** field, enter the value for the role. This must match the user group in NGINX Management Suite. + - Provide a description for the role. +4. Select **Save**. + +### Assign the app role to the application + +1. On the **App registrations** page, select the first application you created (e.g., "Instance Manager"). +2. In the left navigation menu, under **Manage**, select **API permissions**. +3. Select **Add a permission**. +4. In the **Request API permissions** section, select **My APIs**. +5. Select the app name you created for automation (e.g., "Automation"). +6. Under **Application permissions**, select the role you created earlier (e.g., "Admin"). +7. Select **Add permissions**. + +{{< note >}}If the permission is not granted, contact your Microsoft Entra administrator to approve it.{{< /note >}} + +## Configure NGINX OIDC to use Microsoft Entra as the IdP + +Complete the steps in the [Configure NGINX Plus with Microsoft Entra as Identity Provider]({{< relref "/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md#configure-nginx-plus" >}}) topic. Note that you may have already completed some of these steps in the [Before you begin](#before-you-begin) section of this guide. + +Additionally, complete the following steps: + +1. Add your Microsoft Entra tenant domain to the `/etc/nms/nginx/oidc/openid_configuration.conf` file. For example, if your tenant domain is `f5.com`, update the `oidc_domain` setting like this: + + ```nginx + ... + map $host $oidc_domain { + SERVER_FQDN OIDC_DOMAIN; + # replace with OIDC specific setting + default "f5.com"; + } + ... + ``` + +2. Uncomment the relevant OIDC sections in the `/etc/nginx/conf.d/nms-http.conf` file: + + ```nginx + ... + # Enable when using OIDC with Microsoft Entra + map $http_authorization $groups_claim { + "~^Bearer.*" $jwt_claim_roles; + default $jwt_claim_groups; + } + + map $jwt_audience $jwt_aud_client { + default $jwt_audience; + ~^api://(.+)$ $1; + } + + map $http_authorization $user_email { + "~^Bearer.*" '$jwt_aud_client@$oidc_domain'; + default $jwt_claim_email; + } + ... + ``` + +## Get an access token from Microsoft Entra + +1. Send a `POST` request to the Microsoft Entra token endpoint: + + ```bash + https://login.microsoftonline.com//oauth2/v2.0/token + ``` + +2. Include the following in your request body: + - `client_id`: The client ID of the application you created. + - `client_secret`: The client secret for the application. + - `scope`: The application scope (e.g., `api:///.default`). + - `grant_type`: Use `client_credentials`. + +3. The response will contain an access token. Decoding the token should give you a result similar to: + + **Header:** + + ```json + { + "alg": "RS256", + "x5t": "-KI3Q9nNR7bR0fxmeZ0XqbHZGew", + "kid": "-KI3Q9nNR7bR0fxmeZ0XqbHZGew" + } + ``` + + **Payload:** + + ```json + { + "aud": "api://f834b49c-a56e-4fde-9caa-641-bOc26fb8a", + "iss": "https://sts.windows.net/d3dfd2f-6a3b-40d1-9beO-b8f327d81c50/", + "iat": 1593640000, + "nbf": 1593640000, + "exp": 1593643600, + "aio": "42+E2ZYHBXei7VKmxxHzn7h1", + "appid": "374cc05e-aaa1-408f-9348-a83d6b4d8ea6", + "appidacr": "1", + "idp": "https://sts.windows.net/d3dfd2f-6a3b-40d1-9beO-b8f327d81c5/", + "oid": "2db3db56-f58b-455a-9ff5-4e1e8b17a171", + "rh": "0.AQABA_893Ttq0UCb4L0QwQ-DJ9QgcILmngha-4Q", + "roles": [ + "28a3143e-4217-485e-9fOf-092abc01239b01" + ], + "sub": "2db3db56-f58b-455a-9ff5-4e1e8b17a1a71", + "tid": "dd3dfd2f-6a3b-40d1-9bee-bfaqw27d81c5e", + "uti": "EmqiFiTC-kACZqN5vrKd8AQ" , + } + ``` + + {{< note >}}The `roles` claim will contain the role ID of the role you created in the [Create an app role](#create-app-role) step.{{< /note >}} + +## Access NGINX Management Suite API using the access token + +To access the NGINX Management Suite API using the access token, send the token in the `Authorization` header of the request as a Bearer token. For example, using `curl`: + +```bash +curl -v -k --header "Authorization: Bearer " https:///api/platform/v1/userinfo +``` + +Replace `` with the token you obtained from Microsoft Entra and `` with the IP address of your NGINX Management Suite instance. diff --git a/content/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md b/content/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md new file mode 100644 index 000000000..9438ea7e2 --- /dev/null +++ b/content/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md @@ -0,0 +1,216 @@ +--- +docs: DOCS-795 +doctypes: +- tutorial +tags: +- docs +title: "Microsoft Entra: Set up OIDC authentication" +toc: true +weight: 100 +--- + + +## Overview + +This guide explains how to configure Microsoft Entra (AD) as an identity provider (IdP) for F5 NGINX Instance Manager. By implementing OIDC for authentication, administrators can simplify user management in NGINX Instance Manager. Instead of creating and managing users individually, administrators can create user groups in NGINX Instance Manager that align with groups in their Identity Provider. Access and permissions for users are determined by the roles assigned to their respective user groups. Users from the Identity Provider who are not part of a group with an assigned role will not have access to NGINX Instance Manager. + +We strongly recommend Open ID Connect (OIDC) as the preferred authentication method for the NGINX Instance Manager. OIDC brings several benefits, including Single Sign-On (SSO) and simplified user management through user groups. + +To configure Microsoft Entra as an OIDC IdP, follow these steps: + +**Configure Microsoft Entra:** + +1. Create an Application Registration for NGINX Instance Manager. +2. Add owners (users) and their email addresses to Microsoft Entra. +3. Create groups in Microsoft Entra and assign user membership. + +**Configure NGINX Instance Manager:** + +1. Add user groups to NGINX Instance Manager, using the same group names as in Microsoft Entra. +2. Configure NGINX Plus in NGINX Instance Manager to use Microsoft Entra as the designated identity provider. + +## Requirements + +To successfully follow the instructions in this guide, you must complete the following requirements: + +1. Create a [Microsoft Entra premium account](https://azure.microsoft.com/en-us/pricing/details/active-directory/). If you have a standard account, you'll need to upgrade. +2. [Install Instance Manager]({{< relref "/nim/deploy/vm-bare-metal/install.md" >}}) on a server that also has [NGINX Plus R25 or a newer version installed]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}). Make sure the server hosting NGINX Plus has a fully qualified domain name (FQDN). +3. [Install the NGINX JavaScript module (njs)](https://www.nginx.com/blog/introduction-nginscript/) on the same server as Instance Manager. This module is necessary for managing communications between NGINX Plus and the identity provider. + +## Configure Microsoft Entra {#configur-entra} + +Complete the steps in the section to configure Microsoft Entra for use with NGINX Instance Manager. + +### Register Application {#az-ad-register-app} + +To register an application with Microsoft Entra: + +1. Go to the [Azure portal](https://portal.azure.com/#home) and log in. +2. Select **Microsoft Entra** from the list of Azure services. +3. On the left navigation menu, under the **Manage** section, select **App registrations**. +4. Select **New registration**. +5. Provide the following details: + - Enter a name for the application in the **Name** field, such as "NGINX Instance Manager". + - Select **Account in this organizational directory only** from the list of account types. + - Under the **Redirect URI** section, choose **Web** and enter the redirect URI, for example, `https:///_codexch`. + + {{< img src="/security/oidc/azure-register-app.png" alt="Azure: register an application." width="600" height="415" >}} + +6. Select **Register**. +7. On the confirmation page, make a note of the following information. You'll need to provide this information later to complete the setup: + - Application (client) ID + - Directory (tenant) ID + +### Create Client Secret {#az-ad-client-secret} + +{{< important >}}Make sure to save the value of the client secret in a secure location for future reference. Once you navigate away from the page, the value cannot be retrieved again.{{< /important >}} + +To create a client secret: + +1. On the left navigation menu, under the **Manage** section, select **Certificates & secrets**. +2. Select **New client secret**. +3. In the **Description** box, type a description for the client secret. +4. Select **Add**. The client secret will be added to the list with a unique secret string value and ID. +5. Copy the value for the client secret. + +### Add Owners {#az-ad-owners} + +{{< important >}}Make sure to add at least one user with administrative privileges. Failure to do so may lock admin users out of NGINX Instance Manager. If that happens, revert to Basic Auth to restore access.{{< /important >}} + +To add owners (users): + +1. On the left navigation menu, under the **Manage** section, select **Owners**. +2. Select **Add owners**. +3. Search for the user you want to add, then select **Select**. Repeat this step for each user you want to add. + +### Add Group Claim to Token {#az-ad-group-claim} + +{{< note >}}The only supported group claim format for groups created in Microsoft Entra is **Microsoft Entra group ObjectId**.{{< /note >}} + +To include the user's group membership information in the token for authentication and authorization, follow these steps: + +1. On the left navigation menu, under the **Manage** section, select **Token configuration**. +2. Select **Add groups claim**. +3. Select **Groups assigned to the application**. +4. Select **Add**. + +### Assign Group to Application {#az-ad-group} + +{{< note >}}By default, tokens expire after 60 minutes. You can find instructions on configuring token expiration in the Microsoft Entra topic [Configurable token lifetime properties](https://learn.microsoft.com/en-us/azure/active-directory/develop/Active-directory-configurable-token-lifetimes#configurable-token-lifetime-properties).{{< /note >}} + +Adding a group to the registered application will give all group members the same access. + +1. On the left navigation menu, under the **Manage** section, select **Overview**. +2. In the **Essentials** section, select the link next to **Managed application in local directory**. +3. In the **Getting Started** section, select **Assign users and groups**. +4. Select **Add user/group**. +5. On the **Add Assignment** form, under the **Users and groups** section, select **None Selected**. +6. In the search box in the **Users and groups** drawer, type the name of the group you want to associate with the application. +7. Select the group from the list, and select **Select**. +8. Finally, select **Assign**. + +## Configure NGINX Instance Manager {#configure-nginx-instance-manager} + +### Create Roles in NGINX Instance Manager + +{{< include "nim/rbac/create-roles.md" >}} + +### Create User Groups in NGINX Instance Manager + +{{< include "nim/rbac/create-user-groups.md" >}} + +### Configure NGINX Plus with Microsoft Entra as Identity Provider {#configure-nginx-plus} + +Configure NGINX Plus to use Microsoft Entra as the identity provider. + +1. Install the NGINX JavaScript module (njs) on your NGINX Instance Manager server by running the appropriate command. This module is required for handling the interaction between NGINX Plus and Microsoft Entra (IdP). + + - CentOS, RHEL: + ```bash + sudo yum install nginx-plus-module-njs + ``` + + - Debian, Ubuntu: + ```bash + sudo apt install nginx-plus-module-njs + ``` + +2. Open the `/etc/nginx/nginx.conf` file in a text editor and add the following directive to the top-level ("main") section to load the NGINX JavaScript module: + ```nginx + load_module modules/ngx_http_js_module.so; + ``` + +3. Open the `/etc/nms/nginx/oidc/openid_configuration.conf` file in a text editor. Replace the following variables in the file with the values you saved when [configuring Microsoft Entra](#configure-entra). Save the changes: + - `{client_key}`: Replace with the **Application (client) ID** obtained when [registering the application](#az-ad-register-app). + - `{tenant_key}`: Replace with the **Directory (tenant) ID** obtained when [registering the application](#az-ad-register-app). + - `{client_secret}`: Replace with the encoded client secret that was generated when [creating the client secret](#az-ad-client-secret). + +
    + Example openid_configuration.conf + + ```yaml + # NGINX Instance Manager - OpenID Connect configuration + # Created for v. 2.0 + # (c) NGINX, Inc. 2021 + + # Enable when using OIDC with Microsoft Entra + map $http_authorization $groups_claim { + "~^Bearer.*" $jwt_claim_roles; + default $jwt_claim_groups; + } + map $jwt_audience $jwt_aud_client { + default $jwt_audience; + ~^api://(.+)$ $1; + } + map $http_authorization $user_email { + "~^Bearer.*" '$jwt_aud_client@$oidc_domain'; + default $jwt_claim_email; + } + map $host $oidc_authz_endpoint { + default "https://login.microsoftonline.com/{tenant_key}/oauth2/v2.0/authorize"; + } + map $host $oidc_token_endpoint { + default "https://login.microsoftonline.com/{tenant_key}/oauth2/v2.0/token"; + } + map $host $oidc_jwt_keyfile { + default "https://login.microsoftonline.com/{tenant_key}/discovery/v2.0/keys"; + } + ``` + +
    + +4. Using a text editor, open the `/etc/nginx/conf.d/nms-http.conf` configuration file and uncomment the OIDC settings starting with `#OIDC`. Comment out the Basic Authentication settings. Save the changes. + +
    + Example nms-http.conf + + ```yaml + # NGINX Instance Manager - Instance Manager configuration + # Created for v. 2.0 + # (c) NGINX, Inc. 2021 + + # OIDC: use email as a unique identifier + proxy_set_header Nginx-Management-Suite-User $user_email; + proxy_set_header Nginx-Management-Suite-Groups $groups_claim; + proxy_set_header Nginx-Management-Suite-ExternalId $jwt_claim_sub; + + # OIDC authentication + include /etc/nms/nginx/oidc/openid_connect.conf; + ``` + +
    + +5. Verify that the configuration file does not contain any errors: + ```bash + sudo nginx -t + ``` + +6. Reload NGINX and apply the configuration: + ```bash + sudo nginx -s reload + ``` + +## Try It Out + +1. Open a web browser and go to the FQDN of your NGINX Instance Manager host. You will be redirected to the Microsoft Entra login page. +2. Enter your Microsoft Entra email address and password to log in. diff --git a/content/nim/admin-guide/authentication/oidc/scim-provisioning.md b/content/nim/admin-guide/authentication/oidc/scim-provisioning.md new file mode 100644 index 000000000..50d5570e6 --- /dev/null +++ b/content/nim/admin-guide/authentication/oidc/scim-provisioning.md @@ -0,0 +1,143 @@ +--- +docs: DOCS-918 +doctypes: +- task +tags: +- docs +title: Provision users and groups using SCIM +toc: true +weight: 500 +--- + +## Overview + +Starting with NGINX Instance Manager 2.3, you can use SCIM 2.0 (System for Cross-domain Identity Management) to provision, update, or deprovision users and user groups through an open API for managing identities. + +## Access SCIM APIs + +NGINX Instance Manager enforces RBAC for the SCIM APIs through the `USER-MANAGEMENT` feature. To have full access, users must be assigned to a role with `Create`, `Read`, `Update`, and `Delete` permissions. + +### SCIM Endpoints + +The SCIM endpoints listed below follow the specifications outlined in the [SCIM Endpoints and HTTP Methods](https://datatracker.ietf.org/doc/html/rfc7644#section-3.2). + +#### Available Endpoints + +- **`/api/scim/v2/Users` (POST):** Adds a new IDP user. +- **`/api/scim/v2/Users` (GET):** Retrieves all IDP users. Pagination parameters are supported according to the SCIM standard, and you can filter by `userName`. +- **`/api/scim/v2/Users/{userID}` (PUT):** Updates an existing IDP user. +- **`/api/scim/v2/Users/{userID}` (DELETE):** Deletes an existing IDP user. +- **`/api/scim/v2/Groups` (POST):** Adds a new user group. +- **`/api/scim/v2/Groups` (GET):** Retrieves all IDP groups. Pagination parameters are supported according to the SCIM standard. +- **`/api/scim/v2/Groups/{groupID}` (PUT):** Updates an existing user group. +- **`/api/scim/v2/Groups/{groupID}` (DELETE):** Deletes an existing user group. + +## Request and response schemas + +Requests and responses follow the schema format defined in [Resource Schema Representation](https://datatracker.ietf.org/doc/html/rfc7643#section-8.7.1). + +### Create user + +To create a user, send a **POST** request similar to the following example to the `/api/scim/v2/Users` endpoint: + +#### Request + +```json +{ + "schemas": [ + "urn:ietf:params:scim:schemas:core:2.0:User" + ], + "externalId": "idpuser@mydomain.ctrl", + "userName": "idpuser@mydomain.ctrl", + "name": { + "formatted": "Example IDP User", + "familyName": "Example", + "givenName": "SSO" + } +} +``` + +#### Response + +```json +{ + "emails": [ + { + "value": "idpuser@mydomain.ctrl" + } + ], + "externalId": "idpuser@mydomain.ctrl", + "id": "dc898740-4e9c-41a4-912c-1f3a20edf66e", + "meta": { + "created": "2022-06-17T21:03:37.138Z", + "lastModified": "2022-06-17T21:03:37.138Z", + "location": "/api/scim/v2/Users/dc898740-4e9c-41a4-912c-1f3a20edf66e", + "resourceType": "User" + }, + "name": { + "familyName": "Example", + "givenName": "SSO" + }, + "schemas": [ + "urn:ietf:params:scim:schemas:core:2.0:User" + ], + "userName": "idpuser@mydomain.ctrl" +} +``` + +
    + +### Create group + +To create a group, send a **POST** request similar to the following example to the `/api/scim/v2/Groups` endpoint: + +#### Request + +In this request, `User` is assigned as a member. + +```json +{ + "schemas": [ + "urn:ietf:params:scim:schemas:core:2.0:Group" + ], + "displayName": "Example Group", + "externalId": "7fcb12f4-af71-4f7d-a987-6c1a91cb838a", + "members": [ + { + "type": "User", + "value": "dc898740-4e9c-41a4-912c-1f3a20edf66e" + } + ] +} +``` + +#### Response + +```json +{ + "displayName": "Example Group", + "externalId": "7fcb12f4-af71-4f7d-a987-6c1a91cb838a", + "id": "e023964d-8a63-44f8-aa85-51ffa6aaa8f1", + "members": [ + { + "type": "User", + "value": "dc898740-4e9c-41a4-912c-1f3a20edf66e" + } + ], + "meta": { + "created": "2022-06-17T21:06:46.774Z", + "lastModified": "2022-06-17T21:06:46.774Z", + "location": "/api/scim/v2/Groups/e023964d-8a63-44f8-aa85-51ffa6aaa8f1", + "resourceType": "Group" + }, + "schemas": [ + "urn:ietf:params:scim:schemas:core:2.0:Group" + ] +} +``` + +{{< important >}}After creating a group with SCIM, you need to [assign roles to the group]({{< relref "/nim/admin-guide/rbac/assign-roles.md" >}}) in NGINX Instance Manager so the group has permissions associated with it.{{< /important >}} + +## Update users created with SCIM + +To update users created with SCIM, you need to use the SCIM API or update the users directly in your identity provider (IdP). You can't use the NGINX Instance Manager web interface to edit user details from an identity provider. diff --git a/content/nim/admin-guide/license/_index.md b/content/nim/admin-guide/license/_index.md new file mode 100644 index 000000000..bdaf856b0 --- /dev/null +++ b/content/nim/admin-guide/license/_index.md @@ -0,0 +1,7 @@ +--- +title: License and usage reporting +weight: 1 +url: /nginx-instance-manager/admin-guide/license/ +cascade: + type: "nim-r33" +--- \ No newline at end of file diff --git a/content/nim/admin-guide/license/add-license.md b/content/nim/admin-guide/license/add-license.md new file mode 100644 index 000000000..1460f32d4 --- /dev/null +++ b/content/nim/admin-guide/license/add-license.md @@ -0,0 +1,44 @@ +--- +docs: DOCS-789 +doctypes: task +title: Add license +toc: true +weight: 1 +--- + +## Overview + +To unlock all of the features in NGINX Instance Manager, you’ll need to add a JSON Web Token (JWT) license from MyF5. This guide shows you how to set up your network for reporting, download the license file, and apply it to NGINX Instance Manager. If needed, you can also cancel the license at any time. + +## Before you begin + +### Set up your network for entitlement and usage reporting + +NGINX Instance Manager can automatically report subscription entitlement and usage data to F5 if internet access is available. Ensure port `443` is open for these URLs: + +- https://product.apis.f5.com/ +- https://product-s.apis.f5.com/ee + + +## Download the license from MyF5 {#download-license} + +To download the JSON Web Token license from MyF5: + +{{< include "licensing-and-reporting/download-jwt-from-myf5.md" >}} + +## Add the license to NGINX Instance Manager {#apply-license} + +To add the license to NGINX Instance Manager: + +{{< include "nim/admin-guide/license/add-license-webui.md" >}} + +NGINX Instance Manager will connect to F5’s servers to retrieve your entitlements. Once completed, your entitlements and usage details appear on the **Licenses** page. + +(Optional) To automatically report license entitlement and usage data to F5, select **Enable Continuous Connection**. Make sure your network is configured for reporting. + +## Canceling a license + +To cancel a license: + +1. Go to the **Licenses > Overview** page (`https:///ui/settings/license`). +2. Select **Terminate**, and confirm the action. diff --git a/content/nim/admin-guide/license/report-usage-connected-deployment.md b/content/nim/admin-guide/license/report-usage-connected-deployment.md new file mode 100644 index 000000000..35a56b420 --- /dev/null +++ b/content/nim/admin-guide/license/report-usage-connected-deployment.md @@ -0,0 +1,82 @@ +--- +title: "Report usage data to F5" +date: 2024-10-14T11:29:57-07:00 +# Change draft status to false to publish doc. +draft: false +# Description +# Add a short description (150 chars) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +weight: 10 +toc: true +tags: [ "docs" ] +# Create a new entry in the Jira DOCS Catalog and add the ticket ID (DOCS-) below +docs: "DOCS-1650" +# Taxonomies +# These are pre-populated with all available terms for your convenience. +# Remove all terms that do not apply. +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["task"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] + +--- + +## Overview + +In environments where NGINX Instance Manager has internet access but NGINX Plus doesn’t, NGINX Plus sends usage data to NGINX Instance Manager. NGINX Instance Manager will automatically send the usage reports to F5 for verification, or you can choose to send them manually. + +**If usage reporting fails, NGINX Plus will stop processing traffic.** There's an exception for previously reported instances — refer to [handling outages](#handling-outages) for more details. + +See the steps below to configure NGINX Plus to report usage data to NGINX Instance Manager and how to submit the report to F5 for verification. + +{{}}If your deployment of NGINX Instance Manager doesn’t allow internet access, follow the steps in [Report usage data in network-restricted environments]({{< relref "nim/disconnected/report-usage-disconnected-deployment.md" >}}) to report usage data to F5.{{}} + +--- + +## Before you begin + +Before submitting usage data to F5, first ensure that the appropriate network ports are open for NGINX Instance Manager to report to F5, and then configure NGINX Plus to report telemetry data to NGINX Instance Manager. + +### Configure network ports for reporting usage + +To allow NGINX Instance Manager to report usage data to F5, make sure port `443` is open for these URLs: + +- `https://product.apis.f5.com/` +- `https://product-s.apis.f5.com/ee` + +### Configure NGINX Plus to report usage to NGINX Instance Manager + +To configure NGINX Plus (R33 and later) to report usage data to NGINX Instance Manger: + +{{< include "licensing-and-reporting/configure-nginx-plus-report-to-nim.md" >}} + +--- + +## Submit usage report to F5 + +### Automatic reporting + +When you [add your JSON Web Token (JWT)]({{< relref "nim/admin-guide/license/add-license.md" >}}) to NGINX Instance Manager, usage reporting is enabled by default. + +NGINX Instance Manager will automatically report subscription entitlement and usage data to F5 if internet access is available. + +### Manual reporting + +{{}}You need to report usage to F5 regularly. **If usage isn’t reported for 180 days, NGINX Plus will stop processing traffic**. For more details about the usage reporting process, see [About subscription licenses]({{< relref "solutions/about-subscription-licenses.md" >}}).{{}} + +If you prefer submitting usage reports to F5 manually, follow these steps: + +1. Log in to the NGINX Instance Manager web interface (`https:///ui/`). +2. Select the **Settings** (gear) icon. +3. On the **Licenses > Overview** page, turn off **Enable Continuous Connection**. +4. To manually submit a usage report, select **Send Usage to F5**. + +--- + +## What's reported + +{{< include "licensing-and-reporting/reported-usage-data.md" >}} diff --git a/content/nim/admin-guide/maintenance/_index.md b/content/nim/admin-guide/maintenance/_index.md new file mode 100644 index 000000000..7b6b7e753 --- /dev/null +++ b/content/nim/admin-guide/maintenance/_index.md @@ -0,0 +1,6 @@ +--- +title: Maintenance +weight: 400 +url: /nginx-instance-manager/admin-guide/maintenance/ +--- + diff --git a/content/nim/admin-guide/maintenance/backup-and-recovery.md b/content/nim/admin-guide/maintenance/backup-and-recovery.md new file mode 100644 index 000000000..ab800c22f --- /dev/null +++ b/content/nim/admin-guide/maintenance/backup-and-recovery.md @@ -0,0 +1,238 @@ +--- +docs: DOCS-1098 +tags: +- docs +title: Back up and restore +toc: true +weight: 1 +--- + +## Overview + +NGINX Instance Manager includes several scripts for backing up and restoring configuration files, secrets, and databases used by the platform. + +The backup and restore scripts are provided for reference and may need to be adjusted to suit your specific deployment. + +{{< call-out "important" "NGINX Instance Manager 2.14.1 and earlier:" >}}If you're using **NGINX Instance Manager version 2.14.1 or earlier**, you'll need to [install SQLite]({{< relref "/nim/admin-guide/maintenance/sqlite-installation.md" >}}) to run the backup and recovery scripts.{{}} + +{{}} Some commands and directories still use `nms` in their paths or names because they were established when NGINX Instance Manager was part of the NGINX Management Suite. These names remain unchanged in this version for consistency with the existing file structure.{{}} + +--- + +## NGINX Instance Manager deployed on a Virtual Machine or Bare Metal + +### Before you begin + +To follow the instructions in this guide, make sure you have the following: + +- An installed version of NGINX Instance Manager +- NGINX Instance Manager services must be running: + + ```shell + sudo systemctl start nms + ``` + +### Make scripts executable + +To run the backup and restore scripts, you need to set their permissions to make them executable. + +1. Open a secure shell (SSH) connection to the NGINX Instance Manager host and log in. +2. Navigate to the directory where the scripts are located: + + ```shell + cd /etc/nms/scripts + ``` + +3. Run the following commands to make the scripts executable: + + ```shell + sudo chmod +x backup.sh + sudo chmod +x restore.sh + sudo chmod +x support-package.sh + ``` + +### Back up and restore NGINX Instance Manager + +To back up configuration files, secrets, and databases: + +1. Open a secure shell (SSH) connection to the NGINX Instance Manager host and log in. +2. Run the following commands to back up NGINX Instance Manager: + + ```shell + cd /etc/nms/scripts + sudo ./backup.sh + ``` + + The backup will be saved as a tarball, similar to this example: `/tmp/nms-backup-.tgz`. + +To restore NGINX Instance Manager: + +1. Open a secure shell (SSH) connection to the NGINX Instance Manager host and log in. +2. Run the following commands to restore NGINX Instance Manager: + + ```shell + cd /etc/nms/scripts + sudo ./restore.sh /tmp/nms-backup-.tgz + ``` + +--- + +## NGINX Instance Manager deployed in a Kubernetes Cluster + +### Before you begin + +To complete the steps in this guide, ensure the following: + +- An installed version of NGINX Instance Manager + + + +- **Root Access**: + You’ll need superuser (sudo) access to run the backup and restore scripts, which use the `kubectl` command to interact with the Kubernetes API. Ensure that the root user has access to the Kubernetes cluster. + + To verify root access to the Kubernetes API, run this command: + + ```shell + sudo kubectl -n nms get pods + ``` + + If there are no errors and you see a list of running pods/nodes, root access is confirmed. + + If the root user lacks access, configure the Kubernetes API access for root or provide the Kubernetes configuration file path through the `KUBECONFIG` environment variable: + + ```shell + KUBECONFIG=/etc/kubernetes/admin.conf + ``` + + Replace `/etc/kubernetes/admin.conf` with the actual configuration path for your cluster if it differs. + +- **Utility Pod**: + Ensure the `utility` pod is installed in your Kubernetes cluster: + + 1. Update your [Helm Deployment values.yaml file]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md#configure-chart" >}}), adding the following line to enable the utility pod: + + ```yaml + global: + utility: true + ``` + + 2. [Upgrade your NGINX Instance Manager deployment]({{< relref "/nim/deploy/kubernetes/deploy-using-helm#helm-upgrade-nim" >}}). + + 3. Download the Helm chart for the installed version of NGINX Instance Manager: + + ```shell + helm repo add nginx-stable https://helm.nginx.com/stable + helm repo update + helm pull nginx-stable/nms + tar zxvf nms-.tgz + ``` + +### Back up NGINX Instance Manager + +To back up NGINX Instance Manager deployed in a Kubernetes cluster: + +1. Copy the backup script `k8s-backup.sh` from the extracted Helm chart to your working directory: + + ```shell + cp nms-/charts/nms-hybrid/backup-restore/k8s-backup.sh . + ``` + +2. Make the script executable: + + ```shell + chmod +x k8s-backup.sh + ``` + +3. Run the backup script: + + ```shell + ./k8s-backup.sh + ``` + + {{< note >}}The backup script does not require `sudo` permissions or the `utility` pod.{{}} + +4. The script will prompt you for the NGINX Instance Manager namespace. It will create a backup archive called `k8s-backup-.tar.gz`. + +### Full restoration to the same Kubernetes Cluster + +To restore NGINX Instance Manager to the same Kubernetes cluster: + +1. Copy the restore script `k8s-restore.sh` from the extracted Helm chart to your working directory: + + ```shell + cp nms-/charts/nms-hybrid/backup-restore/k8s-restore.sh . + ``` + +2. Make the script executable: + + ```shell + chmod +x k8s-restore.sh + ``` + +3. Copy your backup file (`k8s-backup-.tar.gz`) to the same directory as `k8s-restore.sh`. + +4. Run the restore script: + + ```shell + sudo KUBECONFIG=/etc/kubernetes/admin.conf ./k8s-restore.sh -i k8s-backup-.tar.gz -r + ``` + + If the Kubernetes configuration is different, update the path accordingly. + + {{< note >}}The restore script requires [root access]({{< relref "/nim/admin-guide/maintenance/backup-and-recovery.md#root-access" >}}).{{}} + +5. After specifying the NGINX Instance Manager namespace, the script will use the provided backup archive. + + {{< note >}}The script uses the `utility` pod to restore databases and core secrets. It stops service pods during the restoration and restarts them afterward.{{}} + +### Data-only restoration to a different Kubernetes Cluster + +To restore NGINX Instance Manager to a different Kubernetes cluster: + +1. Copy the restore script `k8s-restore.sh` from the extracted Helm chart to your working directory: + + ```shell + cp nms-/charts/nms-hybrid/backup-restore/k8s-restore.sh . + ``` + +2. Make the script executable: + + ```shell + chmod +x k8s-restore.sh + ``` + +3. Copy your backup file (`k8s-backup-.tar.gz`) to the same directory as `k8s-restore.sh`. + +4. Run the restore script: + + ```shell + sudo KUBECONFIG=/etc/kubernetes/admin.conf ./k8s-restore.sh -i k8s-backup-.tar.gz -r -d + ``` + + If the Kubernetes configuration differs, update the path accordingly. + + {{< note >}}The restore script requires [root access]({{< relref "/nim/admin-guide/maintenance/backup-and-recovery.md#root-access" >}}).{{}} + +5. After specifying the NGINX Instance Manager namespace, the script will restore the databases and core secrets. + +If you want to restore user passwords, extract the backup archive and run the following commands: + + ```shell + cd k8s-backup-/secrets + kubectl -n nms apply -f nms-auth.json + kubectl -n nms delete pod apigw- + ``` + +--- + +## ClickHouse + +ClickHouse supports backup and restore on versions greater than v22. + +Refer to [ClickHouse's documentation](https://clickhouse.com/docs/en/operations/backup) for backup and restore instructions. + +To check your ClickHouse version, run: + +```shell +clickhouse-server --version +``` diff --git a/content/nim/admin-guide/maintenance/sqlite-installation.md b/content/nim/admin-guide/maintenance/sqlite-installation.md new file mode 100644 index 000000000..c7a8bfaed --- /dev/null +++ b/content/nim/admin-guide/maintenance/sqlite-installation.md @@ -0,0 +1,83 @@ +--- +docs: DOCS-1270 +tags: +- docs +title: Install SQLite (for NGINX Instance Manager 2.14.1 and earlier) +toc: true +weight: 10 +--- + +## Install SQLite for your Linux distribution + +If you're using **NGINX Instance Manager version 2.14.1 or earlier**, you'll need to install SQLite to run the [backup and recovery scripts]({{< relref "/nim/admin-guide/maintenance/backup-and-recovery.md" >}}). +Follow the steps to install SQLite for your operating system. Note that some older versions of Linux might require additional steps, explained below. + +### CentOS, RHEL, and RPM-based distributions + +To install SQLite on your system, run the appropriate command(s) for your Linux distribution: + +#### For RHEL and RPM-based distributions (excluding CentOS 7, Amazon Linux 2, and Oracle Linux 7): + +```bash +sudo yum install -y sqlite +``` + +#### For CentOS 7, Amazon Linux 2, and Oracle Linux 7: + +```bash +sudo su +yum install -y gcc \ + make \ + automake \ + lz4-devel \ + libtool \ + diffutils \ + file + +LIBUV=1.30.0 +RAFT=0.10.0 +SQLITE3=3410100 + +curl -L -o libuv.tar.gz https://github.com/libuv/libuv/archive/refs/tags/v${LIBUV}.tar.gz && \ + tar -zxf libuv.tar.gz && cd libuv-${LIBUV} && sh autogen.sh && ./configure --prefix=/usr --enable-shared=no && make && make install && cd .. && rm -rf libuv.tar.gz libuv-${LIBUV} +curl -L -o raft.tar.gz https://github.com/canonical/raft/archive/refs/tags/v${RAFT}.tar.gz && \ + tar -zxf raft.tar.gz && cd raft-${RAFT} && autoreconf -i && ./configure --disable-lz4 --prefix=/usr --enable-shared=no && make && make install && cd .. && rm -rf raft.tar.gz raft-${RAFT} +curl -L -o sqlite3.tar.gz https://sqlite.org/2023/sqlite-autoconf-${SQLITE3}.tar.gz && \ + tar -zxf sqlite3.tar.gz && cd sqlite-autoconf-${SQLITE3} && ./configure --prefix=/usr --enable-shared=no && make && make install && cd .. && rm -rf sqlite3.tar.gz sqlite-autoconf-${SQLITE3} +``` + +### Debian, Ubuntu, and Deb-based distributions + +To install SQLite on your system, run the appropriate command(s) for your Linux distribution: + +#### For Debian, Ubuntu, and Deb-based distributions (excluding Debian (buster/sid) and Ubuntu 18.04): + +```bash +sudo apt-get update +sudo apt-get install -y sqlite3 +``` + +#### For Debian (buster/sid) and Ubuntu 18.04: + +```bash +sudo su +apt-get install -y gcc \ + make \ + automake \ + liblz4-dev \ + pkg-config \ + libtool \ + diffutils \ + file + +LIBUV=1.43.0 +RAFT=0.16.0 +SQLITE3=3410100 + +curl -L -o libuv.tar.gz https://github.com/libuv/libuv/archive/refs/tags/v${LIBUV}.tar.gz && \ + tar -zxf libuv.tar.gz && cd libuv-${LIBUV} && sh autogen.sh && ./configure --prefix=/usr --enable-shared=no && make && make install && cd .. && rm -rf libuv.tar.gz libuv-${LIBUV} +curl -L -o raft.tar.gz https://github.com/canonical/raft/archive/refs/tags/v${RAFT}.tar.gz && \ + tar -zxf raft.tar.gz && cd raft-${RAFT} && autoreconf -i && ./configure --disable-lz4 --prefix=/usr --enable-shared=no && make && make install && cd .. && rm -rf raft.tar.gz raft-${RAFT} +curl -L -o sqlite3.tar.gz https://sqlite.org/2023/sqlite-autoconf-${SQLITE3}.tar.gz && \ + tar -zxf sqlite3.tar.gz && cd sqlite-autoconf-${SQLITE3} && ./configure --prefix=/usr --enable-shared=no && make && make install && cd .. && rm -rf sqlite3.tar.gz sqlite-autoconf-${SQLITE3} +``` diff --git a/content/nim/admin-guide/rbac/_index.md b/content/nim/admin-guide/rbac/_index.md new file mode 100644 index 000000000..cc8b07c0f --- /dev/null +++ b/content/nim/admin-guide/rbac/_index.md @@ -0,0 +1,6 @@ +--- +title: RBAC +weight: 200 +url: /nginx-management-suite/admin-guide/rbac/ + +--- diff --git a/content/nim/admin-guide/rbac/add-rbac-with-tagging.md b/content/nim/admin-guide/rbac/add-rbac-with-tagging.md new file mode 100644 index 000000000..24a8dbcee --- /dev/null +++ b/content/nim/admin-guide/rbac/add-rbac-with-tagging.md @@ -0,0 +1,98 @@ +--- +description: Deprecated in Instance Manager 2.1.0. This topic explains how to set + up role-based access control with tags in Instance Manager. +docs: DOCS-790 +doctypes: task +draft: true +tags: +- docs +- deprecated +title: (Deprecated) Set Up RBAC with Tagging +toc: true +weight: 1000 +--- + +{{< shortversions "2.0.0" "2.0.1" "nimvers" >}} + +## Overview + +{{< deprecated >}}Adding tags to set up RBAC is deprecated in Instance Manager 2.1.0. To use the new RBAC, refer to the [Set Up RBAC]({{< relref "/nim/admin-guide/rbac/overview-rbac" >}}) guide. +{{< /deprecated >}} + +When [defining a role]({{< relref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md#roles" >}}) in Instance Manager, you can use [tags]({{< relref "/nim/nginx-instances/add-tags.md" >}}) to restrict a role's permissions for groups of instances. + +To access an instance with an assigned tag, a role must have `Instance Management` permission, and the permission needs to have a tag matching the instance's. + +{{}} +Changes made to a role may take up to 10 minutes to take effect. + +Admin users can view, add, and change any system tags, as well as any access levels. Non-admin users are restricted to viewing only the roles and tags they've been assigned. + +Untagged instances can be accessed by all users that have the `Instance Management` permission. +{{}} + +## Set Role Permissions Using the API + +To set a role's permissions with tags using the Instance Manager Rest API, send a POST request similar to the following example to the Roles API: + +```shell +curl -X POST "https:///api/platform/v1/roles" -H "Authorization: Bearer " -H "content-type: application/json" -d " +{ + "metadata": { + "description": "Role settings for managers", + "displayName": "manager", + "name": "manager" + }, + "roleDef": { + "permissions": [ + { + "access": "READ", + "scope": "INSTANCE-MANAGEMENT", + "tags": [ + "env:prod" + ] + }, + { + "access": "WRITE", + "scope": "INSTANCE-MANAGEMENT", + "tags": [ + "env:dev" + ] + } + ] + } +}" +``` + +{{}} + +| Parameter | Type | Description | +|--------------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `permissions.access` | string | The access level determines the role's ability to access a path or object.

    The options are:

    • READ: has read-only access (HTTP, GET requests)

    • WRITE: has read and write access (POST, PUT, PATCH, DELETE requests) | +| `permissions.scope` | string | Sets the scope the role has access to.

    The options are:

    • SETTINGS: has access to the Instance Manager settings APIs, including license, users, and roles

    • INSTANCE-MANAGEMENT: has access to to the instance management APIs | +| `permissions.tags` | string | Tags are matched to resources in the API to determine access privileges. Tags can only be used with the INSTANCE-MANAGEMENT scope. | + +{{
    }} + +The example above defines a role with `READ` permission for instances with the `env:prod` tag and `WRITE` permission for instances with the `env:dev` tag. + +For more information about the Roles API, see the Instance Manager REST API Documentation: `https:///ui/docs`. + +## Set Role Permissions Using the Web Interface + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. +2. Select the **Settings** gear icon. +3. In the left menu, select **Roles**. +4. Select **Create**. +5. On the **Create Role** form, complete the following: + + - In the **Name** box, type the name of the role. + - In the **Display Name** box, type a display name for the role. + - In the **Permissions** section, select **Create**. + - In the **Scope** list, select **Instance Management**. + - In the **Access** list, select the access level for the role. The options are `Read` or `Write`. + - In the **Tags** list, select a tag or tags to apply to the role, or select **Add New Tag** to create a tag. + +6. Select **Save**. + +{{< img src="/rbac/role-create-with-tags.png" alt="Role creation: create role with tags." width="600" height="415" >}}
    diff --git a/content/nim/admin-guide/rbac/assign-roles.md b/content/nim/admin-guide/rbac/assign-roles.md new file mode 100644 index 000000000..4d966a3b3 --- /dev/null +++ b/content/nim/admin-guide/rbac/assign-roles.md @@ -0,0 +1,33 @@ +--- +docs: DOCS-1273 +doctypes: +- task +tags: +- docs +title: Assign roles to users or user groups +toc: true +weight: 210 +--- + +## Overview + +In NGINX Instance Manager, role-based access control (RBAC) lets you assign permissions to users or user groups based on their roles in the organization. This ensures that users only have access to the features they need. Before assigning roles, make sure you’ve created the necessary users or user groups. This guide covers how to assign roles to users using basic authentication and to user groups when using an OpenID Connect (OIDC) identity provider. + +## Before you begin + +{{}} +Make sure you’ve already created users or user groups that can be assigned roles. If not, follow these guides: + +- To create users, follow the instructions in [Set up basic authentication]({{< relref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md" >}}). +- To create user groups, follow the steps in [Getting started with OIDC]({{< relref "/nim/admin-guide/authentication/oidc/getting-started.md" >}}). +{{}} + + + +## Assign roles to users (basic authentication) + +{{< include "/nim/rbac/assign-roles-to-users.md" >}} + +## Assign roles to user groups (OIDC) + +{{< include "/nim/rbac/assign-roles-to-user-groups.md" >}} diff --git a/content/nim/admin-guide/rbac/create-roles.md b/content/nim/admin-guide/rbac/create-roles.md new file mode 100644 index 000000000..77c28f7e9 --- /dev/null +++ b/content/nim/admin-guide/rbac/create-roles.md @@ -0,0 +1,56 @@ +--- +docs: DOCS-1272 +doctypes: +- task +tags: +- docs +title: Create and manage roles +toc: true +weight: 200 +--- + +## Overview + +NGINX Instance Manager emphasizes role-based access control (RBAC) to manage user permissions. A predefined `admin` role is available for initial setup and administration, but you can create custom roles to match specific responsibilities, such as for API Owners or Infrastructure Admins. This lets organizations fine-tune access and permissions to suit their needs. + +## Create roles {#create-roles} + +{{< include "nim/rbac/create-roles.md" >}} + +## Edit roles {#edit-roles} + +To modify an existing role in NGINX Instance Manager, follow these steps: + +1. In a web browser, go to the FQDN of your NGINX Instance Manager host and log in. +2. Select the **Settings** gear icon in the upper-right corner. +3. From the left navigation menu, select **Roles**. +4. From the list, select the role you want to update. +5. Select **Edit Role** and make changes to any of the editable fields if needed: + - **Display name**: an optional, user-friendly name for the role + - **Description**: an optional, brief summary of the role + +6. To add new permissions to the role: + + 1. Select **Add Permission**. + 2. In the **Module** list, select the relevant module. + 3. In the **Feature** list, select the feature you're assigning permissions for. + + 4. Select **Add Additional Access** to grant a CRUD (Create, Read, Update, Delete) access level. + + - In the **Access** list, select the access level(s) you want to assign. + + 5. Select **Save**. + +7. To edit an existing permission, select **Edit** next to the permission name. + + 1. In the **Edit Permission** form, modify the **Module**, **Feature**, or access levels as needed. + +8. After making your changes, select **Save**. + +## Next steps + +### Assign roles to users or user groups + +Once you’ve created roles, assign them to users or user groups to ensure that permissions align with responsibilities. This helps maintain clear and organized access control. + +- [Assign roles to users or user groups]({{< relref "/nim/admin-guide/rbac/assign-roles.md" >}}) diff --git a/content/nim/admin-guide/rbac/manage-resource-groups.md b/content/nim/admin-guide/rbac/manage-resource-groups.md new file mode 100644 index 000000000..b1fbf6908 --- /dev/null +++ b/content/nim/admin-guide/rbac/manage-resource-groups.md @@ -0,0 +1,277 @@ +--- +docs: DOCS-1271 +doctypes: +- tutorial +tags: +- docs +title: Manage resource groups +toc: true +weight: 300 +--- + +## Overview + +Resource groups are containers that help you manage multiple resources collectively, such as instances, instance groups, and certificates. By using resource groups, you can assign RBAC (Role-Based Access Control) permissions to multiple resources as a single entity. This provides a more flexible way of managing resources compared to [Instance Groups]({{< relref "/nim/nginx-instances/manage-instance-groups.md" >}}), where all instances share the same configuration. Resource groups don’t have this limitation and allow you to manage various resources under one group for better control. + +## Before you begin + +To complete the steps in this guide, ensure you have: + +- A running version of [NGINX Instance Manager]({{< relref "/nim/deploy/_index.md" >}}). +- One or more registered NGINX data plane instances. + +{{< include "/nim/how-to-access-api-docs.md" >}} + +--- + +## Default systems set + +By default, when NGINX instances (also called systems) register with NGINX Instance Manager, they are automatically added to a resource group called `default-systems`. This default group can be used to set base-level permissions for new instances until you organize them into more specific resource groups. + +--- + +## Creating, updating, and deleting resource groups + +### Create a resource group + +#### Using the web interface + +To create a resource group using the NGINX Instance Manager web interface: + +1. Log in to the FQDN for your NGINX Instance Manager host. +2. In the Launchpad menu, select **Settings**. +3. From the left-side menu, select **Resource Groups**. +4. Select **Create**. +5. In the **Basic Information** form, provide the following details: + - **Name**: Enter a unique name for the resource group. + - **Description**: Optionally, provide a description for the resource group. +6. Select **Next**. +7. In the **Certs** form, select any certificates to include in the resource group, or leave this section blank if none are needed. +8. Select **Next**. +9. In the **Instance Groups** form, select any instance groups to include, or leave this section blank. +10. Select **Next**. +11. In the **Systems** form, select any instances you want to include in the resource group, or leave this section blank. +12. Select **Save** to create the resource group. + +#### Using the API + +##### HTTP request (POST) + +To create a resource group using the REST API, send an HTTP `POST` request to the Resource Groups endpoint. + +- **Method**: `POST` +- **Endpoint**: `/api/platform/v1/resource-groups` + +##### Parameters + +When creating a resource group via the API, include the following parameters: + +- **name** (required): The name of the resource group. +- **description** (optional): A brief description of the resource group. +- **resources** (optional): An array of resource objects, such as instances or instance groups. Each resource object includes: + - **module**: The module name, such as `Instance Manager`. + - **name**: The name of the resource. + - **object**: The type of object, for example, `Systems`. + - **uid**: The unique identifier of the resource. + +##### Example JSON request + +```json +{ + "name": "my-resource-group", + "description": "Group for managing test instances", + "resources": [ + { + "module": "Instance Manager", + "name": "test-instance", + "object": "Systems", + "uid": "1234abcd-5678-90ef-ghij-klmnopqrstuv" + } + ] +} +``` + +--- + +### Modify an existing resource group + +#### Using the web interface + +To modify an existing resource group in the web interface: + +1. Log in to the FQDN for your NGINX Instance Manager host. +2. From the Launchpad menu, select **Settings**. +3. Select **Resource Groups** from the left-side menu. +4. Choose the resource group you want to update and click **Edit**. +5. Modify the following details as needed: + - **Name**: Update the name of the resource group. + - **Description**: Update or add a description for the resource group. +6. Select **Next** to navigate through the following sections: + - **Certs**: Add or remove certificates. + - **Instance Groups**: Add or remove instance groups. + - **Systems**: Add or remove instances. +7. Select **Save** to apply the changes. + +#### Using the API + +##### HTTP request (POST) + +To add resources to an existing resource group, send an HTTP `POST` request to the Resource Groups endpoint. + +- **Method**: `POST` +- **Endpoint**: `/api/platform/v1/resource-groups/{resourceGroupUid}/resources` + +##### Parameters + +Include the following parameters when adding resources: + +- **module** (required): The module name, such as `Instance Manager`. +- **name** (required): The name of the resource. +- **object** (required): The type of resource object (for example `Systems`). +- **uid** (required): The unique identifier of the resource. + +##### Example JSON request + +```json +{ + "module": "Instance Manager", + "name": "new-instance", + "object": "Systems", + "uid": "1a2b3c4d-5678-90ef-ghij-klmnopqrst" +} +``` + +--- + +### Remove resources from a resource group + +#### Using the web interface + +To remove resources from a resource group: + +1. Log in to the FQDN for your NGINX Instance Manager host. +2. In the Launchpad menu, select **Settings**. +3. Choose **Resource Groups** from the left-side menu. +4. Select the resource group you want to modify and click **Edit**. +5. In each section (Certs, Instance Groups, Systems), uncheck the resources you want to remove. +6. Select **Save** to apply the changes. + +#### Using the API + +##### HTTP request (DELETE) + +To remove a resource from a resource group via the REST API, send an HTTP `DELETE` request. + +- **Method**: `DELETE` +- **Endpoint**: `/api/platform/v1/resource-groups/{resourceGroupUid}/resources/{resourceUid}?moduleName=Instance Manager` + +##### Example request + +```bash +DELETE https:///api/platform/v1/resource-groups/{resourceGroupUid}/resources/{resourceUid}?moduleName=Instance Manager +``` + +--- + +### Delete a resource group + +#### Using the web interface + +To delete a resource group: + +1. Log in to the FQDN for your NGINX Instance Manager host. +2. From the Launchpad menu, select **Settings**. +3. Select **Resource Groups** from the left-side menu. +4. Choose the resource group you want to delete and click **Delete**. + +#### Using the API + +##### HTTP request (DELETE) + +To delete a resource group via the REST API, send an HTTP `DELETE` request. + +- **Method**: `DELETE` +- **Endpoint**: `/api/platform/v1/resource-groups/{resourceGroupUid}` + +##### Example request + +```bash +DELETE https:///api/platform/v1/resource-groups/{resourceGroupUid} +``` + +--- + +## Viewing resource groups + +### View a list of resource groups + +#### Using the web interface + +To view a list of resource groups: + +1. Log in to the FQDN for your NGINX Instance Manager host. +2. From the Launchpad menu, select **Settings**. +3. In the left-side menu, select **Resource Groups** to see a list of existing groups. + +#### Using the API + +##### HTTP request (GET) + +To retrieve a list of resource groups without resource details: + +- **Method**: `GET` +- **Endpoint**: `/api/platform/v1/resource-groups?showDetails=false` + +To retrieve a list of resource groups with resource details: + +- **Method**: `GET` +- **Endpoint**: `/api/platform/v1/resource-groups?showDetails=true` + +##### Example request (showing resource details) + +```bash +GET https:///api/platform/v1/resource-groups?showDetails=true +``` + +--- + +## Putting it all together + +### Example: Managing test environment resources + +This example demonstrates how to manage resources in a test environment by creating a resource group, defining a role for resource management, and assigning that role to a user. + +#### Step 1: Create the **test-env-resources** resource group + +1. Log in to the FQDN for your NGINX Instance Manager host. +2. In the Launchpad menu, select **Settings**. +3. From the left-side menu, select **Resource Groups**. +4. Select **Create**. +5. Provide the following details: + - **Name**: Enter `test-env-resources`. + - **Description**: Enter `Test Environment Resources`. +6. Add any relevant certificates, instance groups, or systems to the group. +7. Select **Save**. + +#### Step 2: Create the **test-env-admin** role + +1. In the Launchpad menu, select **Settings**. +2. From the left-side menu, select **Roles**. +3. Select **Create**. +4. Provide the following details: + - **Name**: Enter `test-env-admin`. + - **Description**: Enter `Test Environment Admin`. +5. Add permissions for the role: + - **Certs**: Read access for all certs. + - **Instance-Groups**: Read access for all instance groups. + - **Instance-Management**: Read access for all systems. + - **Resource Groups**: Full access (Create, Read, Update, Delete) for the `test-env-resources` group. +6. Select **Save**. + +#### Step 3: Assign the **test-env-admin** role to a user + +1. From the left-side menu, select **Users**. +2. Choose the user you want to assign the role to (for example, `john-doe`). +3. Select **Edit User**. +4. In the **Roles** list, select the `test-env-admin` role. +5. Select **Save**. diff --git a/content/nim/admin-guide/rbac/overview-rbac.md b/content/nim/admin-guide/rbac/overview-rbac.md new file mode 100644 index 000000000..7da6ddeb6 --- /dev/null +++ b/content/nim/admin-guide/rbac/overview-rbac.md @@ -0,0 +1,58 @@ +--- +title: "Overview: RBAC" +weight: 1 +toc: true +type: concept +product: nim +docs: DOCS-919 +--- + +## Overview + +Role-Based Access Control (RBAC) is a security system that governs access to resources within a software application. By assigning specific roles to users or groups, RBAC ensures that only authorized individuals have the ability to perform certain actions or access particular areas. + +The value of RBAC lies in its ability to provide clear and structured control over what users can see and do. This makes it easier to maintain security, streamline user management, and ensure compliance with internal policies or regulations. By giving users only the permissions they need to fulfill their roles, RBAC reduces the risk of unauthorized access and fosters a more efficient and secure operating environment. + +The following are essential concepts related to RBAC: + +- **Users**: Users are individual accounts identified by a username and credentials. You have the option to create users within F5 NGINX Instance Manager using basic authentication or to integrate with an external identity provider using OpenID Connect (OIDC). +- **Roles**: Roles are sets of permissions linked to one or more features. Each role specifies the actions that are allowed for each feature, such as creating, reading, updating, or deleting. The pre-defined `admin` role grants full access to all features. + + Users can have multiple roles simultaneously. In such cases, the permissions granted by each role are combined, providing an additive effect. For instance, a user with two roles, one granting read access to all NGINX instances and the other allowing create, update, and delete access to a specific instance, will be able to read all instances while having the ability to create, update, or delete only the designated instance for which they have permission. + +- **Groups**: Groups are collections of users. They are used only when integrating with external identity providers. Users from these providers can't be assigned roles directly within NGINX Instance Manager but inherit roles through membership in groups. +- **Features**: In NGINX Instance Manager, features refer to distinct functional components or capabilities that let users perform a variety of tasks and access related resources. The sections below outline the features available for NGINX Instance Manager. +- **Resource Object**: These are specific elements within a feature that can be targeted for precise access control. Essentially, a resource object is a finer-grained component within a feature that you can control access to. For example, if you are working with the Instance Management feature, you have the option to apply access control to specific entities like Resource Groups and/or Systems. This allows for more nuanced management of permissions within NGINX Instance Manager. + +## Features {#features} + +NGINX Instance Manager provides a range of capabilities called features, which system administrators can manage using role-based access control (RBAC). The availability of some features depends on your license. For more information on licensing, see the [Add a license]({{< relref "/nim/admin-guide/license/add-license.md" >}}) topic. + +### NGINX Instance Manager features + +#### Unlicensed + +- **NGINX Plus Counting**: View the number of registered NGINX Plus instances and track Kubernetes usage. +- **Licensing**: View and manage licenses. +- **Resource Groups**: Create, configure, and manage resource groups. +- **User Management**: Create, configure, and manage roles, users, and user groups. + +#### Licensed + +- **Analytics**: Access analytics endpoints, including metrics, catalogs, and events. +- **Certificates**: View and manage certificates for NGINX instances, with resources like Certs, Instance Groups, Resource Groups, and Systems. +- **Instance Groups**: Create, configure, and manage NGINX instance groups. +- **Instance Management**: View and manage NGINX instances within Resource Groups and Systems. +- **Scan**: Perform scans for NGINX instances. +- **Security Policies**: View and manage security policies for NGINX instances, which depend on Instance Management and Instance Groups for publishing. +- **Staged Configurations**: View, create, update, and delete staged NGINX configurations for Instance Groups, Resource Groups, and Systems. +- **Templates**: View, create, update, and delete NGINX config templates for Instance Groups, Resource Groups, Systems, and Templates. +- **Template Submissions**: Manage NGINX config template submissions, including viewing, creating, updating, and deleting submissions for Instance Groups, Resource Groups, Systems, Templates, and Template Submissions. + +### Endpoints + +To explore the API endpoints for NGINX Instance Manager, visit: + +- **API Endpoints**: `https:///ui/docs`. + +Replace `` with the fully qualified domain name (FQDN) of your NGINX Instance Manager host. diff --git a/content/nim/deploy/_index.md b/content/nim/deploy/_index.md new file mode 100644 index 000000000..6b3a1c0b8 --- /dev/null +++ b/content/nim/deploy/_index.md @@ -0,0 +1,8 @@ +--- +title: Deploy +description: +weight: 20 +url: /nginx-instance-manager/deploy/ +cascade: + type: "nim-r33" +--- \ No newline at end of file diff --git a/content/nim/deploy/docker/_index.md b/content/nim/deploy/docker/_index.md new file mode 100644 index 000000000..f93bf8404 --- /dev/null +++ b/content/nim/deploy/docker/_index.md @@ -0,0 +1,5 @@ +--- +title: Docker +weight: 10 +url: /nginx-instance-manager/deploy/docker +--- \ No newline at end of file diff --git a/content/nim/deploy/docker/deploy-nginx-instance-manager-docker-compose.md b/content/nim/deploy/docker/deploy-nginx-instance-manager-docker-compose.md new file mode 100644 index 000000000..1ca98e9ab --- /dev/null +++ b/content/nim/deploy/docker/deploy-nginx-instance-manager-docker-compose.md @@ -0,0 +1,209 @@ +--- +categories: +- +date: "2024-11-06T12:00:00-07:00" +description: +doctypes: +- deployment guide +tags: +- docs +title: "Deploy using Docker Compose" +toc: true +versions: [] +weight: 100 +docs: "DOCS-1653" +--- + + +## Overview + +This guide will show you how to deploy and use F5 NGINX Instance Manager in Docker using [Docker Compose](https://docs.docker.com/compose/). + +This NGINX Instance Manager docker compose deployment is a single Docker image containing NGINX Instance Manager, Security Monitoring, and the latest App Protect compilers, which is orchestrated using a Docker Compose docker-compose.yaml definition. + +The ClickHouse database is deployed in a separate container to improve resilience and make this a fault tolerant solution. You can also configure persistent storage + +--- + +## What you need + +- A working version of [Docker](https://docs.docker.com/get-docker/) +- Your NGINX Instance Manager subscription's JSON Web Token from [MyF5](https://my.f5.com/manage/s/subscriptions) You can use the same JSON Web Token as NGINX Plus in your MyF5 portal. +- This pre-configured `docker-compose.yaml` file: + - {{}} {{}}. + +--- + +## Minimum requirements + +Deploying NGINX Instance Manager with docker requires a minimum of 4 CPU cores and 4 GB of memory for basic use cases. However, every environment is unique, primarily due to variations in the NGINX configurations being managed. For instance, managing NGINX instances with hundreds of configuration files or those with WAF (NGINX App Protect) enabled can significantly increase resource demands. + +If your use case is limited to usage tracking without active management or agent communication, the minimum requirements should suffice. For more complex deployments, we recommend reviewing the technical specifications guide to ensure the resources allocated are sufficient to handle an increased workload, particularly for the ClickHouse database, which may need to manage a higher volume of reads and writes. + +## Before you start + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +### Set up Docker for NGINX container registry + +To set up Docker to communicate with the NGINX container registry located at `private-registry.nginx.com`, follow these steps: + +{{< include "/nim/docker/docker-registry-login.md" >}} + +### Compose deployment + +Navigate to the directory where you downloaded `docker-compose.yaml`. With the following commands, use docker to log in to private-registry.nginx.com and then run `docker compose up -d`. + +```shell +~$ docker login private-registry.nginx.com --username= --password=none +~$ echo "admin" > admin_password.txt +~$ docker compose up -d +[+] Running 6/6 + ✔ Network nim_clickhouse Created 0.1s + ✔ Network nim_external_network Created 0.2s + ✔ Network nim_default Created 0.2s + ✔ Container nim-precheck-1 Started 0.8s + ✔ Container nim-clickhouse-1 Healthy 6.7s + ✔ Container nim-nim-1 Started 7.4s +``` + +### Supported environment variables + +You may modify the following variables in the `docker-compose.yaml` file: + +- `NIM_LOG_LEVEL` - set the NGINX Instance Manager logging level. +- `NIM_METRICS_TTL` - set a custom time-to-live in days value for metrics retention. +- `NIM_EVENTS_TTL` - set a custom time-to-live in days value for events retention. +- `NIM_SECURITY_TTL` - set a custom time-to-live in days value for security violation retention. +- `NIM_MAINTENANCE` - enable maintenance mode to preform backup, restore and troubleshooting. +- `NIM_WATCHDOG_TIMEOUT` - set a custom dpm watchdog timeout in seconds. +- `NIM_LICENSE_MODE_OF_OPERATION` - set the NGINX Instance Manager license mode of operation to either connected or disconnected. Default is connected. + +### Compose stop or tear down + +Navigate to the directory where you downloaded `docker-compose.yaml`. If you started NIM with `docker compose up -d`, stop NIM services once you've finished with them by running `docker compose stop`. You can bring everything down, removing the containers entirely, with the `docker compose down` command. + +```shell +docker compose down +``` +``` +[+] Running 6/6 + ✔ Container nim-nim-1 Removed 30.6s + ✔ Container nim-clickhouse-1 Removed 1.4s + ✔ Container nim-precheck-1 Removed 0.0s + ✔ Network nim_default Removed 0.9s + ✔ Network nim_external_network Removed 0.4s + ✔ Network nim_clickhouse Removed 0.6s +``` + +--- + +## Secrets + +In the same `docker-compose.yaml` file, you can modify the following credentials: + +Set the admin password (required) + +```yaml +secrets: + nim_admin_password: + file: admin_password.txt +``` + +Pass a custom `.htpasswd` file (Optional) + +```yaml + nim_credential_file: + file: nim_creds.txt +``` + +Optionally, you can also set the external SSL certificate, key, and CA files, in PEM format for the NGINX Instance Manager Ingress proxy. + +```yaml +secrets: + nim_proxy_cert_file: + file: ./certs/nim_cert.pem + nim_proxy_cert_key: + file: ./certs/nim_key.pem + nim_proxy_ca_cert: + file: ./certs/nim_ca.pem +``` + +--- + +## Backup + +Once you've set up your Docker containers, use the following command to back them up: + +```shell +~$ docker exec nim-nim-1 nim-backup +... +Backup has been successfully created: /data/backup/nim-backup-.tgz +``` + +If your system uses named volumes, inspect the `Mountpoint`. Alternatively, if you're using a shared NFS volume, then collect the data directly from the mount point. + +```shell +~/compose$ docker inspect volume nim_nim-data | jq '.[0].Mountpoint' +"/var/lib/docker/volumes/nim_nim-data/_data" +ubuntu@ip-
    :~/compose$ sudo ls -l /var/lib/docker/volumes/nim_nim-data/_data/backup +-rw-r--r-- 1 root root 5786953 Sep 27 02:03 nim-backup-.tgz +``` + +--- + +## Restore + +Before you can restore a backup, set your containers to maintenance mode in the same `docker-compose.yaml` file: + +```yaml + environment: + NIM_MAINTENANCE: "true" +``` + +```shell +~$ docker exec nim-nim-1 nim-restore /data/backup/nim-backup-.tgz +... +NGINX Instance Manager has been restored. +``` + +Once the process is complete set `NIM_MAINTENANCE` to `false` and then run `docker-compose up -d`. + +--- + +## Storage + +By default, the storage uses named volumes. Alternatively, you can use optional `driver_opts` settings to support other storage formats such as NFS. +For all storage volumes, make sure to mount them, before running `docker compose up -d`. For a mounted NFS volume, you might use the following commands: + +```shell +~$ sudo mount -t nfs <>:/mnt/nfs_share/clickhouse /mnt/nfs_share/clickhouse +~$ sudo mount -t nfs <>:/mnt/nfs_share/data /mnt/nfs_share/data +``` + +```yaml +volumes: + # By default docker compose will create a named volume + # Refer to https://docs.docker.com/reference/compose-file/volumes/ for additional storage options such as NFS + nim-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=<>,rw" + device: ":/mnt/nfs_share/data" + clickhouse-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=<>,rw" + device: ":/mnt/nfs_share/clickhouse" +``` + +--- + +## Support Data + +In case of problems, it's a good practice to: + +- Collect logs `docker-compose logs --since 24h > my-logs-$(date +%Y-%m-%d).txt` +- Collect backup information `docker exec nim-nim-1 nim-backup` diff --git a/content/nim/deploy/docker/deploy-nginx-instance-manager-docker.md b/content/nim/deploy/docker/deploy-nginx-instance-manager-docker.md new file mode 100644 index 000000000..f392449ec --- /dev/null +++ b/content/nim/deploy/docker/deploy-nginx-instance-manager-docker.md @@ -0,0 +1,309 @@ +--- +categories: +- +date: "2024-06-06T12:00:00-07:00" +description: +doctypes: +- deployment guide +tags: +- docs +title: "Deploy in a single container (demo only)" +toc: true +versions: [] +weight: 100 +docs: "DOCS-1652" +--- + +{{< call-out "important" "Single container deployment not suitable for production" "fa-solid fa-triangle-exclamation" >}} +This single container deployment is intended for demo purposes and small-scale deployments only. It is not recommended for production environments. +{{}} + +## Overview + +This guide will show you how to deploy and use F5 NGINX Instance Manager with Docker. The NGINX Instance Manager container is a single Docker image that includes all dependencies, making it easy to quickly set up NGINX Instance Manager with NGINX Open Source. + +This deployment is ideal for: + +- Product demos +- Instance counting +- Small-scale environments (20 instances or fewer) + +{{< call-out "important" "This Docker option only works for Instance Manager 2.17 and will no longer be supported" "fa-solid fa-triangle-exclamation" >}} +Please use the [Docker Compose]({{< relref "/nim/deploy/docker/deploy-nginx-instance-manager-docker-compose.md" >}}) option for Instance Manager 2.18 or later, as it includes better resiliency and fault tolerance. +{{< /call-out >}} + +By the end of this guide, you'll be able to: + +- Perform a quick test without persistent storage. +- Persist data to a volume. +- Set the admin password with an environment variable. +- Override self-signed API gateway certificates. +- Configure user access to the container using an `.htpasswd` file. + +--- + +## What you need + +- A working version of [Docker](https://docs.docker.com/get-docker/) +- Your NGINX Instance Manager subscription's JSON Web Token from [MyF5](https://my.f5.com/manage/s/subscriptions). You can use the same JSON Web Token as NGINX Plus in your MyF5 portal. + +--- + +## Before you start + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +### Set up Docker for NGINX container registry + +To set up Docker to communicate with the NGINX container registry located at `private-registry.nginx.com`, follow these steps: + +{{< include "/nim/docker/docker-registry-login.md" >}} + +### Data persistence + +- A single volume mount is required to persist the NGINX Instance Manager databases. For example: `--volume=/myvolume/nim:/data` +- An optional volume can be used to add a custom `.htpasswd` file for admin and user authentication. For example: `--volume=/myvolume/pass/.htpasswd:/.htpasswd` + +### Supported environment variables + +- `NMS_PERSIST_DISABLE`: Do not persist data to a volume. All data will be lost after the container stops or restarts. +- `NMS_ADMIN_PASSWORD`: Set an admin password. +- `NMS_APIGW_CERT`: Override the API gateway self-signed certificate. +- `NMS_APIGW_KEY`: Override the API gateway self-signed key. +- `NMS_APIGW_CA`: Override the API gateway self-signed CA. +- `LOG_LEVEL`: Set the logging level for NGINX Instance Manager. + +--- + +## Build examples + +### Quick test without persistent storage + +1. Run the following Docker command, replacing the placeholders with the appropriate values: + - ``: desired hostname + - ``: password for the admin account + - ``: specific release version you want to use (**note:** `latest` is not supported) + + ```bash + docker run -it --rm \ + --hostname= \ + -e NMS_PERSIST_DISABLE \ + -e NMS_ADMIN_PASSWORD="" \ + -p 8443:443 \ + private-registry.nginx.com/nms/nim-bundle: + ``` + +
    + + {{< call-out "tip" "Example:" "fas fa-terminal" >}} + To pull the NGINX Instance Manager 2.17.0 image, set the hostname to "mynim," and set the admin password to "abc123\!@", run: + + ```bash + docker run -it --rm \ + --hostname=mynim \ + -e NMS_PERSIST_DISABLE \ + -e NMS_ADMIN_PASSWORD="abc123\!@" \ + -p 8443:443 \ + private-registry.nginx.com/nms/nim-bundle:2.17.0 + ``` + + {{< /call-out >}} + +2. Upload the license: + - In a web browser, go to `https://:8443` and log in. Replace `` with the actual IP address or hostname of the machine running the Docker container. If you are accessing it locally, use `https://localhost:8443`. + - Select the **Settings** gear icon. + - On the **Settings** menu, select **Licenses**. + - Select **Get Started**. + - Select **Browse** to upload the license, or simply drag and drop the license onto the form. + - Select **Add**. +3. Close the browser to completely log off. +4. Stop and restart the container. +5. Log back in and verify that the license isn't applied. + +--- + +### Set up persistent storage + +1. Create or mount a directory for persistent storage to keep your data if the container restarts. +2. Run the following Docker command, replacing the placeholders with the appropriate values: + - ``: desired hostname + - ``: password for the admin account + - ``: path to the persistent data directory on the host machine + - ``: specific release version you want to use (**note:** `latest` is not supported) + + ```bash + docker run -it --rm \ + --hostname= \ + -e NMS_ADMIN_PASSWORD="" \ + --volume=:/data \ + -p 8443:443 \ + private-registry.nginx.com/nms/nim-bundle: + ``` + +
    + + {{< call-out "tip" "Example:" "fas fa-terminal" >}} + To pull the NGINX Instance Manager 2.17.0 image, set the hostname to "mynim," set the admin password to "abc123\!@", and write data to `~/nms_storage`, run: + + ```bash + docker run -it --rm \ + --hostname=mynim \ + -e NMS_ADMIN_PASSWORD="abc123\!@" \ + --volume=/myvolume/nim-storage:/data \ + -p 8443:443 \ + private-registry.nginx.com/nms/nim-bundle:2.17.0 + ``` + + {{< /call-out >}} + +3. Upload the license: + - In a web browser, go to `https://:8443` and log in. Replace `` with the actual IP address or hostname of the machine running the Docker container. If you are accessing it locally, use `https://localhost:8443`. + - Select the **Settings** gear icon. + - On the Settings menu, select **Licenses**. + - Select **Get Started**. + - Select **Browse** to upload the license, or simply drag and drop the license onto the form. + - Select **Add**. + - Select **Done**. +4. Close the browser to completely log off. +5. Stop and restart the container. +6. Log back in and verify that the license is still applied. + +--- + +### Override self-signed API gateway certificates + +1. Ensure you have access to the required certificates: + - `mycert.pem` + - `mykey.pem` + - `myca.pem` + +2. Run the following Docker command, replacing the placeholders with the appropriate values: + - ``: desired hostname + - ``: password for the admin account + - ``: path to the persistent data directory on the host machine + - ``: specific release version you want to use (**Note:** `latest` is not supported) + + ```bash + docker run -it --rm \ + --hostname= \ + -e NMS_ADMIN_PASSWORD="" \ + -e NMS_APIGW_CERT="$(cat mycert.pem)" \ + -e NMS_APIGW_KEY="$(cat mykey.pem)" \ + -e NMS_APIGW_CA="$(cat myca.pem)" \ + --volume=:/data \ + -p 8443:443 private-registry.nginx.com/nms/nim-bundle: + ``` + +
    + + {{< call-out "tip" "Example:" "far fa-terminal" >}} + To pull the NGINX Instance Manager 2.17.0 image, set the hostname to "mynim," use the password "abc123!@", pass in the certificates `mycert.pem`, `mykey.pem`, and `myca.pem`, and write data to `/myvolume/nim-storage`, run: + + ```bash + docker run -it --rm \ + --hostname=mynim \ + -e NMS_ADMIN_PASSWORD="abc123\!@" \ + -e NMS_APIGW_CERT="$(cat mycert.pem)" \ + -e NMS_APIGW_KEY="$(cat mykey.pem)" \ + -e NMS_APIGW_CA="$(cat myca.pem)" \ + --volume=/myvolume/nim-storage:/data \ + -p 8443:443 \ + private-registry.nginx.com/nms/nim-bundle:2.17.0 + ``` + + {{}} + +3. Log in and verify that the certificates are applied correctly. + + In a web browser, go to `https://:8443` and log in. Replace `` with the actual IP address or hostname of the machine running the Docker container. If you are accessing it locally, use `https://localhost:8443`. + +--- + +### Create and use an `.htpasswd` file + +In previous examples, the admin password was set using the `NMS_ADMIN_PASSWORD` environment variable. You can also set passwords for the admin and other users using an `.htpasswd` file. + +1. Create an `.htpasswd` file on the host machine with an admin user. You will be prompted to enter a password: + + ```bash + htpasswd -c .htpasswd admin + ``` + +2. To add more user passwords, use one of the following commands depending on the desired hashing method: + + {{< call-out "important" "Required: Create new users in the web interface" "fa-solid fa-circle-exclamation" >}} + Additional users must be created using the web interface first. If users are added only to the `.htpasswd` file and not in the web interface, they will not be able to log in. The web interface creates the users but doesn't support adding passwords, while the `.htpasswd` file adds the passwords but doesn't create the users. For instructions on adding users using the web interface, see [Creating Users]({{< relref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md#create-users" >}}). + {{}} + + - For MD5 hash: + + ```bash + htpasswd -m .htpasswd user1 + ``` + + - For SHA hash: + + ```bash + htpasswd -s .htpasswd user2 + ``` + + {{}}NGINX does not support bcrypt password hashing.{{}} + +3. To pass the `.htpasswd` file at runtime, run the following command, replacing the placeholders with the appropriate values: + - ``: desired hostname + - ``: path to the directory containing the `.htpasswd` file on the host machine + - ``: path to the persistent data directory on the host machine + - ``: specific release version you want to use (**Note:** `latest` is not supported) + + ```bash + docker run -it --rm \ + --hostname= \ + --volume=/.htpasswd:/.htpasswd \ + --volume=:/data \ + -p 8443:443 private-registry.nginx.com/nms/nim-bundle: + ``` + + {{}}The admin user must be included in the `.htpasswd` file, or the container will not start.{{}} + +
    + + {{< call-out "tip" "Example:" "far fa-terminal" >}} + To pull the NGINX Instance Manager 2.17.0 image, set the hostname to "mynim," pass in the `/myvolume/nim-auth/.htpasswd` file, and write data to `/myvolume/nim-storage`, run: + + ```bash + docker run -it --rm \ + --hostname=mynim \ + --volume=/myvolume/nim-auth/.htpasswd:/.htpasswd \ + --volume=/myvolume/nim-storage:/data \ + -p 8443:443 private-registry.nginx.com/nms/nim-bundle:2.17.0 + ``` + + {{}} + +4. To copy an updated `.htpasswd` file to a running container, use the following command, replacing the placeholders with the appropriate values: + - ``: path to the directory containing the `.htpasswd` file on the host machine + - ``: name of the running container + + ```bash + docker cp /.htpasswd :/data/local-auth/.htpasswd + ``` + +
    + + {{< call-out "tip" "Example:" "far fa-terminal" >}} + + ```bash + docker cp /home/ubuntu/nim-auth/.htpasswd nginx-instance:/data/local-auth/.htpasswd + ``` + + {{}} + +
    + + {{}} + To find a container's name, use the `docker ps` command, which lists all running containers along with their names. + {{}} + +5. Verify you can log in with the provided usernames and passwords. + + In a web browser, go to `https://:8443` and log in. Replace `` with the actual IP address or hostname of the machine running the Docker container. If you are accessing it locally, use `https://localhost:8443`. diff --git a/content/nim/deploy/docker/deploy-nginx-plus-and-agent-docker.md b/content/nim/deploy/docker/deploy-nginx-plus-and-agent-docker.md new file mode 100644 index 000000000..0e46a050e --- /dev/null +++ b/content/nim/deploy/docker/deploy-nginx-plus-and-agent-docker.md @@ -0,0 +1,203 @@ +--- +docs: "DOCS-1654" +doctypes: +- task +tags: +- docs +title: "Deploy NGINX Plus in a container (data plane)" +toc: true +weight: 200 +--- + +## Overview + +This guide explains how to: + +- get official NGINX Plus Docker container images and add them to your private registry +- start the container using NGINX Agent-specific parameters +- add container instances to F5 NGINX Instance Manager + +--- + +## Before you begin + +Before you start, make sure that: + +- NGINX Instance Manager is [installed]({{< ref "/nim/deploy/vm-bare-metal/install.md" >}}). + + {{}} When installing and configuring NGINX Instance Manager, remember the domain name/IP address and the gRPC port number. You will need them to configure the NGINX Agent to communicate with NGINX Instance Manager. + {{}} +- You have the JSON Web Token (JWT) from MyF5 Customer Portal. The [download instructions](#download-jwt) are below. +- The [Docker Engine](https://docs.docker.com/engine/install/) command-line tool is installed. +- Your private Docker registry is configured and running. + +#### Download the JWT from MyF5 {#download-jwt} + +{{< include "licensing-and-reporting/download-jwt-from-myf5.md" >}} + +--- + +## About NGINX Plus Docker registry + +The NGINX Plus Docker registry is available at `https://private-registry.nginx.com/v2/`. It contains the following Docker container images: + +- [NGINX Plus]({{< ref "/nginx/releases.md" >}}): + `https://private-registry.nginx.com/v2/nginx-plus/base` + +- [Unprivileged]({{< ref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md#nginx-plus-unprivileged-installation" >}}) installation of NGINX Plus: + `https://private-registry.nginx.com/v2/nginx-plus/rootless-base` + +- NGINX Plus bundled with [NGINX Agent](https://docs.nginx.com/nginx-agent/overview/): + `https://private-registry.nginx.com/v2/nginx-plus/agent` + +- Unprivileged installation of NGINX Plus and NGINX Agent: + `https://private-registry.nginx.com/v2/nginx-plus/rootless-agent` + +The images can be targeted for a particular operating system and NGINX Plus release using tags. + +### Tags for operating systems + +{{}} +| Operating system | Basic OS tag | Tag examples | +|--------------------------------------------|--------------|-----------------------------------------------| +| Alpine (x86_64, aarch64) | `alpine` | `r32-alpine`, `r32-alpine-3.20` | +| Debian (x86_64, aarch64) | `debian` | `r32-debian`, `r32-debian-bookworm` | +| Red Hat Enterprise Linux (x86_64, aarch64) | `ubi` | `r32-ubi`, `r32-ubi-9`, `r32-ubi-9-20240624` | +{{}} + +### Tags for NGINX Plus versions + +The NGINX Plus registry contains images for the two most recent versions of NGINX Plus. The basic operating system tag returns the latest version of NGINX Plus built for the latest version of this operating system. + +{{}} +`nginx-plus-r32-ubi-9`, `nginx-plus-r31-alpine-3.19`. +{{}} + +### Listing all tags + +For a complete tag list for NGINX Plus bundled with NGINX Agent images, use the command: + +```shell +curl https://private-registry.nginx.com/v2/nginx-plus//tags/list --key --cert | jq +``` + +where: +- the `` is the location of images in NGINX Plus private registry: `base`, `rootless-base`, `agent`, `rootless-agent` +- the `` is a local path to your client key from MyF5, for example, `/etc/ssl/nginx/nginx-repo-x12345.key` +- the `` is a local path to your client certificate from MyF5, for example,`/etc/ssl/nginx/nginx-repo-x12345.crt` +- the `jq` command is used to format the JSON output for easier reading and requires the [jq](https://jqlang.github.io/jq/) JSON processor to be installed. + +--- + +## Getting an image from the registry + +1. Log in to the NGINX Plus registry. Replace `YOUR_JWT_HERE` with your actual JWT obtained from MyF5: + + ```shell + sudo docker login private-registry.nginx.com --username=YOUR_JWT_HERE --password=none + ``` + +2. Pull the NGINX Plus image from the private registry. Use the NGINX Plus bundled with NGINX Agent image (the `agent` subdirectory) to enable connectivity with NGINX Instance Manager. Replace `VERSION_TAG` with the specific version tag you need. + + ```shell + sudo docker pull private-registry.nginx.com/nginx-plus/agent:VERSION_TAG + ``` +
    + + {{}} + To pull the latest version of NGINX Plus with NGINX Agent image for Debian or Ubuntu, use the command: + + ```shell + sudo docker pull private-registry.nginx.com/nginx-plus/agent:debian + ``` + {{}} + +--- + +## Adding image to private Docker registry + +After pulling the image, tag it and upload it to your private registry. + +1. Log in to your private registry: + + ```shell + docker login + ``` + +2. Tag the image. Replace `` with your registry’s path and `` with the your NGINX Plus version, OS version, or both: + + ```shell + docker tag private-registry.nginx.com/nginx-plus/agent: /nginx-plus/agent: + ``` + +3. Push the image to the private registry and tag it: + + ```shell + docker push /nginx-plus/agent: + ``` + +--- + +## Launching Docker container + +Start the Docker container from your private registry. You will also need to pass NGINX Agent connection settings as environment variables (`--env=$variable`) to enable connectivity with NGINX Instance Manager: + +```shell +sudo docker run \ +--env=NGINX_AGENT_SERVER_GRPCPORT=443 \ +--env=NGINX_AGENT_SERVER_HOST=127.0.0.1 \ +--env=NGINX_AGENT_TLS_ENABLE=true \ +--env=NGINX_AGENT_TLS_SKIP_VERIFY=false \ +--restart=always \ +--runtime=runc \ +-d /nginx-plus/agent: +``` + +where: + + - `NGINX_AGENT_SERVER_GRPCPORT` sets a GRPC port used by NGINX Agent to communicate with NGINX Instance Manager. + - `NGINX_AGENT_SERVER_HOST` sets the domain name or IP address of NGINX Instance Manager. Note that for production environments it is not recommended to expose NGINX Instance Manager to public networks. + - `NGINX_AGENT_TLS_ENABLE` and `NGINX_AGENT_TLS_SKIP_VERIFY` enable mutual TLS, server-side TLS, or insecure mode (not recommended for production environments). See [Encrypt communication](https://docs.nginx.com/nginx-agent/configuration/encrypt-communication/) for details. + - `` is the path to your private registry. + - `` is the tag assigned when pushing to your registry. + +Full list of CLI flags with their default values can be found in [CLI flags and environment variables]({{< ref "/nms/nginx-agent/install-nginx-agent.md#cli-flags-and-environment-variables" >}}). +
    + +## Connecting NGINX Plus from container to NGINX Instance Manager + +1. In a web browser, enter the address for your NGINX Instance Manager, for example, `https://127.0.0.1/ui/`, and log in. + +2. In the **Modules** section, select **Instance Manager**: + +3. Search for live hosts with NGINX Open Source or NGINX Plus. + + - In the left menu, Select **Scan**. + - In the **CIDR** field, specify the mask of the target network, for example, `172.17.0.1/27`. + - In the **Port Ranges** field, specify one or more ports or port ranges separated by a comma, for example, `80,443,8000-8090`. + - Deselect the **ICMP** checkbox if Internet Control Message Protocol (ICMP) echo requests are disabled in your network. + - Select **Scan**. + +4. When the scanning is finished, you will get the list of your NGINX Plus managed and unmanaged instances, including instances running in containers: + +NGINX Plus instances that can be managed by NGINX Instance Manager can be accessed from the **NGINX Plus** tab. + +You can also scan for NGINX instances using the NGINX Instance Manager API; for more information, refer to [Scan NGINX Instances]({{< ref "/nim/nginx-instances/scan-instances.md" >}}). + +### Troubleshooting unmanaged instances + +If the instance appears as "unmanaged", check if: + +- the NGINX Plus Docker image includes NGINX Agent +- the container was started with correct `--env=` parameters of NGINX Agent, for example, `--env=NGINX_AGENT_SERVER_GRPCPORT=443` +- there are connectivity issues between NGINX Instance Manager and the network running NGINX Plus instances (for example, ports `80` and `443` are open in the firewall) + + + + +--- + +## See also + +- [Deploying NGINX and NGINX Plus with Docker]({{< ref "/nginx/admin-guide/installing-nginx/installing-nginx-docker.md" >}}) +- [Full list of agent environment variables]({{< ref "/nms/nginx-agent/install-nginx-agent.md#cli-flags-and-environment-variables" >}}) diff --git a/content/nim/deploy/infrastructure-as-code/_index.md b/content/nim/deploy/infrastructure-as-code/_index.md new file mode 100644 index 000000000..5a5c16417 --- /dev/null +++ b/content/nim/deploy/infrastructure-as-code/_index.md @@ -0,0 +1,6 @@ +--- +title: Infrastructure as Code +weight: 300 +url: /nginx-instance-manager/deploy/infrastructure-as-code/ +draft: false +--- diff --git a/content/nim/deploy/infrastructure-as-code/build-and-deploy.md b/content/nim/deploy/infrastructure-as-code/build-and-deploy.md new file mode 100644 index 000000000..8c7747dd0 --- /dev/null +++ b/content/nim/deploy/infrastructure-as-code/build-and-deploy.md @@ -0,0 +1,84 @@ +--- +docs: DOCS-1247 +doctypes: + - task +tags: + - docs +title: Build and deploy images +toc: true +weight: 300 +--- + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +## Overview + +This guide provides step-by-step instructions for building and deploying F5 NGINX Instance Manager images on different cloud providers. + +The deployment process has two stages: + +- Generate an image using Packer. +- Deploy the image using Terraform. + +{{< call-out "tip" "Open-Source Project on GitHub" "fa-brands fa-github" >}} +The steps in this guide refer to the [NGINX Instance Manager Infrastructure as Code (IAC)](https://github.com/nginxinc/nginx-management-suite-iac) project on GitHub. +{{< /call-out >}} + +--- + +## Before you begin + +Before you start building and deploying images, ensure you: + +- **Install Packer requirements**: Follow the instructions in the [NGINX Instance Manager Image Generation](https://github.com/nginxinc/nginx-management-suite-iac/tree/main/packer#Requirements) README. +- **Install Terraform requirements**: Follow the instructions in the [NGINX Instance Manager Image Deployment](https://github.com/nginxinc/nginx-management-suite-iac/tree/main/packer#Requirements) README. + +--- + +## Packer + +The Packer stage involves building the cloud image and installing NGINX Instance Manager using an [Ansible role](https://github.com/nginxinc/ansible-role-nginx-management-suite). This image will be used later in the deployment stage. + +### Generate the image + +To generate the image, follow the steps appropriate for your cloud deployment in the [NGINX Instance Manager Image Generation](https://github.com/nginxinc/nginx-management-suite-iac/tree/main/packer#how-to-use) README. + +--- + +## Terraform + +The Terraform stage involves deploying the cloud images created during the Packer stage. There are two types of deployment examples: Basic Reference Architecture and Standalone Architecture. + +Ensure you've built the relevant images with Packer before continuing. For the Basic Reference Architecture, you'll need both NGINX Instance Manager and NGINX images. + +### Deploy basic reference architecture image + +The Basic Reference Architecture deploys both the control plane (NGINX Instance Manager) and data plane (NGINX Agent) using cloud best practices. It includes: + +- Load balancers +- NGINX Instance Manager and NGINX Agent instances in the private subnet + +To deploy the Basic Reference Architecture, follow the steps in the [AWS NGINX Instance Manager Basic Reference Architecture](https://github.com/nginxinc/nginx-management-suite-iac/blob/main/terraform/basic-reference/aws/README.md) README. + +
    + +{{< img src="img/iac/aws-infrastructure.png" caption="Figure 1. AWS NGINX Instance Manager basic reference architecture" alt="Diagram showing the AWS basic reference architecture with an Amazon VPC, load balancers, and NGINX Instance Manager components in private subnets." >}} + +### Deploy standalone architecture image + +The Standalone Architecture deploys the control plane in isolation. This is not a best practice solution but can be used as a simple deployment option for multiple clouds. The standalone architecture includes: + +- Control node in the VPC's public subnet +- Security groups to give restricted access + +To deploy the Standalone Architecture, follow the steps for your infrastructure in the [How to Use](https://github.com/nginxinc/nginx-management-suite-iac/tree/main/terraform#how-to-use) section of the NGINX Instance Manager Image Deployment README. + +
    + +{{< img src="img/iac/standalone-architecture.png" caption="Figure 2. NGINX Instance Manager standalone architecture" alt="Diagram showing the standalone architecture for NGINX Instance Manager deployment, featuring an Amazon VPC and a public subnet with NGINX Instance Manager in an EC2 instance secured by a security group." >}} + +--- + +## Suggested reading + +- [Terraform Best Practices](https://developer.hashicorp.com/terraform/cloud-docs/recommended-practices) \ No newline at end of file diff --git a/content/nim/deploy/infrastructure-as-code/configuration.md b/content/nim/deploy/infrastructure-as-code/configuration.md new file mode 100644 index 000000000..158188634 --- /dev/null +++ b/content/nim/deploy/infrastructure-as-code/configuration.md @@ -0,0 +1,79 @@ +--- +docs: DOCS-1248 +doctypes: +- task +tags: +- docs +title: Install with Ansible +toc: true +weight: 200 +--- + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +## Introduction + +This guide explains how to install F5 NGINX Instance Manager using the open-source Ansible role. + +With Ansible, you can automate and replicate your installation across multiple environments. + +{{< call-out "tip" "Open-Source Project on GitHub" "fa-brands fa-github" >}} +The steps in this guide refer to the [Ansible NGINX Instance Manager Role](https://github.com/nginxinc/ansible-role-nginx-management-suite) project on GitHub. +{{< /call-out >}} + +--- + +## Overview + +The Ansible role for NGINX Instance Manager simplifies the installation process by installing all the prerequisites and any modules you specify. + +{{< img src="img/iac/ansible-flow.png" caption="Figure 1. NGINX Instance Manager Ansible flow" alt="A diagram showing the installation flow of the NGINX Instance Manager Ansible role, with four steps: installing NGINX, installing ClickHouse, adding the NGINX Instance Manager repository, and installing the NGINX Instance Manager module(s).">}} + +--- + +## System requirements + +- The Ansible role requirements can be viewed [on GitHub](https://github.com/nginxinc/ansible-role-nginx-management-suite#requirements). + +--- + +## Installation steps + +1. Install Ansible by following the [installation steps on GitHub](https://github.com/nginxinc/ansible-role-nginx-management-suite?tab=readme-ov-file#ansible). +2. Create the inventory file with the details of the host you want to install NGINX Instance Manager on. Make sure you have access to the host. [Example here](https://github.com/nginxinc/ansible-role-nginx-management-suite?tab=readme-ov-file#create-inventory-file). +3. Create the requirements file and install the required Ansible role by following [these steps](https://github.com/nginxinc/ansible-role-nginx-management-suite?tab=readme-ov-file#install-required-roles-and-collections). + +4. Create and run the Ansible playbook. Create a file named `nms-playbook.yml` (or any other name) with contents similar to the following example: + + ```yaml + - hosts: nms + become: yes + vars: + nginx_license: + certificate: ./nginx-repo.crt + key: ./nginx-repo.key + nms_setup: install + nms_version: 2.16.0* + nms_user_name: admin + nms_user_passwd: default + nms_modules: + - name: sm + collections: + - nginxinc.nginx_core + roles: + - nginxinc.nginx_management_suite + ``` + +5. Run the playbook: + + ```shell + ansible-playbook -i nms-playbook.yml + ``` + +{{< see-also >}} For a comprehensive list of configuration options, view the [default `main.yaml` file](https://github.com/nginxinc/ansible-role-nginx-management-suite/blob/main/defaults/main.yml) on GitHub. {{< /see-also >}} + +--- + +## Getting support + +If you need help or have questions, you can request support from the [NGINX Instance Manager Ansible Role Project](https://github.com/nginxinc/ansible-role-nginx-management-suite/blob/main/SUPPORT.md) on GitHub. \ No newline at end of file diff --git a/content/nim/deploy/infrastructure-as-code/overview.md b/content/nim/deploy/infrastructure-as-code/overview.md new file mode 100644 index 000000000..986d970c1 --- /dev/null +++ b/content/nim/deploy/infrastructure-as-code/overview.md @@ -0,0 +1,51 @@ +--- +docs: DOCS-1249 +doctypes: +- task +tags: +- docs +title: Overview +toc: true +weight: 100 +--- + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +Use our user-friendly Ansible role to easily install NGINX Instance Manager. The role also installs NGINX (OSS or Plus) and [ClickHouse](https://clickhouse.com), both of which are required for NGINX Instance Manager. Simplify your infrastructure management with our innovative Infrastructure as Code project, which enables you to set up the control plane and data plane as a unified solution. + +## NGINX Instance Manager Ansible role + +{{< call-out "tip" "Open-Source Project on GitHub" >}} +The steps in this guide refer to the Ansible NGINX Instance Manager Role project on GitHub. +{{}} + +To get started, you’ll need: + +- An NGINX repository certificate and key. For instructions on how to download them, click [here]({{< relref "/nim/deploy/vm-bare-metal/install.md#download-cert-key" >}}). +- A host capable of running Ansible. + +
    + +For more information, see [Install with Ansible]({{< relref "./configuration.md" >}}). + +--- + +## NGINX Instance Manager Infrastructure as Code + +{{< call-out "tip" "Open-Source Project on GitHub" >}} +The steps in this guide refer to the NGINX Instance Manager Infrastructure as Code project on GitHub. +{{< / call-out >}} + +The Infrastructure as Code (IaC) project makes it easy to set up the control plane and data plane together as a single solution. As of now, we offer full coverage for Amazon Web Services (AWS), with more to come. + +The IaC project requires the following: + +- An NGINX repository certificate and key. For instructions on how to download them, click [here]({{< relref "/nim/deploy/vm-bare-metal/install.md#download-cert-key" >}}). +- A trial or paid subscription for NGINX Instance Manager. You can [sign up for NGINX Instance Manager at MyF5](https://account.f5.com/myf5). +- A host capable of running Packer, Ansible, and Terraform. + +{{< img src="img/iac/iac-process.png" caption="Figure 1. NGINX Instance Manager's IaC build and deployment process" alt="Diagram showing the build and deployment process for infrastructure using Ansible and Packer for build and publish, and Terraform for deployment. Supported platforms include Azure, AWS, Google Cloud, and VMware.”">}} + +
    + +For more information, see [Build and deploy images]({{< relref "./build-and-deploy.md" >}}). diff --git a/content/nim/deploy/kubernetes/_index.md b/content/nim/deploy/kubernetes/_index.md new file mode 100644 index 000000000..2d77b1067 --- /dev/null +++ b/content/nim/deploy/kubernetes/_index.md @@ -0,0 +1,6 @@ +--- +title: Kubernetes +weight: 200 +url: /nginx-instance-manager/deploy/kubernetes/ +--- + diff --git a/content/nim/deploy/kubernetes/deploy-using-helm.md b/content/nim/deploy/kubernetes/deploy-using-helm.md new file mode 100644 index 000000000..e72c317b4 --- /dev/null +++ b/content/nim/deploy/kubernetes/deploy-using-helm.md @@ -0,0 +1,348 @@ +--- +docs: DOCS-1651 +title: "Deploy using Helm" +toc: true +weight: 100 +doctypes: +- task +tags: +- docs +--- + +## Overview + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +This guide provides a step-by-step tutorial on how to set up F5 NGINX Instance Manager on a Kubernetes cluster using Helm. You'll learn how to download and use Docker images and customize your deployment. + +### About Helm + +Helm charts are pre-configured packages of Kubernetes resources deployed with a single command. They let you define, install, and upgrade Kubernetes applications easily. + +Helm charts consist of files that describe a group of related Kubernetes resources, like deployments, services, and ingress. They also allow you to manage dependencies between applications, making it easier to deploy multi-tier or complex applications. + +{{< call-out "important" "Supportability considerations" >}} NGINX Instance Manager **does not** support [OpenShift](https://www.redhat.com/en/technologies/cloud-computing/openshift). For better compatibility, use [NGINX Ingress Controller](https://docs.nginx.com/nginx-ingress-controller/). {{< /call-out >}} + +--- + +## Before you begin + +To deploy NGINX Instance Manager using a Helm chart, you need: + +{{< bootstrap-table "table table-striped table-bordered" >}} +| Requirements | Notes | +| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Docker 20.10 or later (linux/amd64) | [Docker documentation](https://docs.docker.com/get-docker) | +| Kubernetes 1.21.3 or later (linux/amd64) | Ensure your client can [access the Kubernetes API server](https://kubernetes.io/docs/concepts/security/controlling-access/). The Helm chart will enable persistent storage using the default storage class in your Kubernetes cluster. More info is available in [Dynamic Volume Provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/). | +| kubectl 1.21.3 or later | [kubectl documentation](https://kubernetes.io/docs/tasks/tools/#kubectl) | +| Helm 3.10.0 or later | [Helm installation guide](https://helm.sh/docs/intro/install/) | +| OpenSSL 1.1.1 or later | [OpenSSL source](https://www.openssl.org/source/) | +| `tar` 1.20 or later | The `tar` tool is usually installed by default. Check your version with `tar --version`. If `tar` is missing or outdated, install it from your distribution’s package manager (like YUM for CentOS/RHEL or APT for Debian/Ubuntu). | + +{{< /bootstrap-table >}} + + + +--- + +## Get the NGINX Instance Manager images + +### Using Docker + +#### Configure Docker to access the NGINX Instance Manager public registry + +{{< include "nim/docker/docker-registry-login.md" >}} + +#### Pull the NGINX Instance Manager images +You can now pull the necessary images for NGINX Instance Manager from the private registry at `private-registry.nginx.com`. + +Replace `` with the specific version you want to use. + +{{< note >}} The `latest` tag is not supported. {{< /note >}} + +```shell +docker pull private-registry.nginx.com/nms/apigw: +docker pull private-registry.nginx.com/nms/core: +docker pull private-registry.nginx.com/nms/dpm: +docker pull private-registry.nginx.com/nms/ingestion: +docker pull private-registry.nginx.com/nms/integrations: +docker pull private-registry.nginx.com/nms/utility: +``` + +If needed, you can push these images to your own private registry. + +#### Push images to your private registry + +After pulling the images, tag them and upload them to your private registry. + +1. Log in to your private registry: + + ```shell + docker login + ``` + +2. Tag and push each image. Replace `` with your registry’s path and `` with the version you’re using (for example, `2.17.0`): + + - For the `apigw` image: + + ```shell + docker tag private-registry.nginx.com/nms/apigw: /nms/apigw: + docker push /nms/apigw: + ``` + + - For the `core` image: + + ```shell + docker tag private-registry.nginx.com/nms/core: /nms/core: + docker push /nms/core: + ``` + + - For the `dpm` image: + + ```shell + docker tag private-registry.nginx.com/nms/dpm: /nms/dpm: + docker push /nms/dpm: + ``` + + - For the `ingestion` image: + + ```shell + docker tag private-registry.nginx.com/nms/ingestion: /nms/ingestion: + docker push /nms/ingestion: + ``` + + - For the `integrations` image: + + ```shell + docker tag private-registry.nginx.com/nms/integrations: /nms/integrations: + docker push /nms/integrations: + ``` + + - For the `utility` image: + + ```shell + docker tag private-registry.nginx.com/nms/utility: /nms/utility: + docker push /nms/utility: + ``` + +### Using Helm with a JWT token + +If you don't need a private registry, you can use a JWT token as a Docker configuration secret with Helm charts. + +Create a Docker registry secret on the cluster, using the JWT token as the username and `none` as the password. The Docker server is `private-registry.nginx.com`. + +{{< note >}} Make sure there are no extra characters or spaces when copying the JWT token. They can invalidate the token and cause 401 errors during authentication. {{< /note >}} + +```shell +kubectl create secret docker-registry regcred \ +--docker-server=private-registry.nginx.com \ +--docker-username= \ +--docker-password=none +``` + +{{< warning >}} + +You might see a warning about `--password` being insecure. + +This can be ignored (since no password is used), but if others have access to this system, delete the JWT token and clear your shell history after deployment. + +{{< /warning >}} + +To confirm the secret is created: + +```shell +kubectl get secret regcred --output=yaml +``` + +You can now use this secret for Helm deployments and point the charts to the public registry. + +--- + +## Add the Helm repository + +{{< note >}} You need Helm 3.10.0 or later for these steps. {{< /note >}} + +Run these commands to install the NGINX Instance Manager chart from the Helm repository: + +```shell +helm repo add nginx-stable https://helm.nginx.com/stable +helm repo update +``` + +The first command adds the `nginx-stable` repository to your local Helm repo list. The second updates the list to ensure you have the latest versions of the charts. + +--- + +## Create a Helm deployment values.yaml file + +The `values.yaml` file customizes the Helm chart installation without editing the chart itself. You can specify image repositories, environment variables, resource requests, and more. + +1. Create a `values.yaml` file similar to this example: + + - Replace `` with your private Docker registry and port (if needed). + - In the `imagePullSecrets` section, add the credentials for your private Docker registry. + + {{< see-also >}} For more on creating a secret, see Kubernetes [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). {{}} + + ```yaml + nms-hybrid: + imagePullSecrets: + - name: regcred + apigw: + image: + repository: /nms-apigw + tag: + core: + image: + repository: /nms-core + tag: + dpm: + image: + repository: /nms-dpm + tag: + ingestion: + image: + repository: /nms-ingestion + tag: + integrations: + image: + repository: /nms-integrations + tag: + utility: + image: + repository: /nms-utility + tag: + ``` + + This file specifies the Docker images for `apigw`, `core`, `dpm`, `ingestion`, `integrations`, and `utility`. It also indicates that a secret called `regcred` should be used for pulling images. + +1. Save and close the `values.yaml` file. + +--- + +## Manage network policies + +To apply network policies for NGINX Instance Manager, ensure Kubernetes has a [network plugin](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) installed before the Helm chart installation. + +By default, the following network policies will be created in the release namespace: + +```shell +kubectl get netpol -n nms +``` + +```text +NAME POD-SELECTOR AGE +apigw app.kubernetes.io/name=apigw 4m47s +clickhouse app.kubernetes.io/name=clickhouse 4m47s +core app.kubernetes.io/name=core 4m47s +dpm app.kubernetes.io/name=dpm 4m47s +ingestion app.kubernetes.io/name=ingestion 4m47s +integrations app.kubernetes.io/name=integrations 4m47s +utility app.kubernetes.io/name=integrations 4m47s +``` + +To disable network policies, update the `values.yaml` file: + +```yaml +networkPolicies: + # Set this to true to enable network policies for NGINX Instance Manager. + enabled: false +``` + +--- + +## Install the chart + +Run the `helm install` command to deploy NGINX Instance Manager: + +1. Replace `` with the path to your `values.yaml` file. +1. Replace `YourPassword123#` with a secure password (containing a mix of uppercase, lowercase letters, numbers, and special characters). + + {{< important >}} Remember to save the password for future use. Only the encrypted password is stored, and there's no way to recover or reset it if lost. {{< /important >}} + +(Optional) Replace `` with the desired chart version. If omitted, the latest version will be installed. + +```shell +helm install -n nms \ +--set nms-hybrid.adminPasswordHash=$(openssl passwd -6 'YourPassword123#') \ +nms nginx-stable/nms \ +--create-namespace \ +-f \ +[--version ] \ +--wait +``` + +To help you choose the right NGINX Instance Manager chart version, see the table in: + +{{< include "nim/kubernetes/nms-chart-supported-module-versions.md" >}} + +--- + +## Validate the deployment + +Check the status of the deployment: + +```shell +helm -n nms status nms +``` + +The status should show `STATUS: deployed` if successful. + +--- + +## Access the web interface + +{{< include "nim/kubernetes/access-webui-helm.md" >}} + +--- + +## Add a license + +A valid license is required to use all NGINX Instance Manager features. + +For instructions on downloading and applying a license, see [Add a License]({{< relref "/nim/admin-guide/license/add-license.md" >}}). + +--- + +## Upgrade NGINX Instance Manager + +To upgrade: + +1. [Update the Helm repository list](#add-helm-repository). +1. [Adjust your `values.yaml` file](#create-a-helm-deployment-values.yaml-file) if needed. +1. To upgrade the NGINX instance deployment, run the following command. This command updates the `nms` deployment with a new version from the `nginx-stable/nms` repository. It also hashes the provided password and uses the `values.yaml` file at the path you specify. + + ```bash + helm upgrade -n nms \ + --set nms-hybrid.adminPasswordHash=$(openssl passwd -6 'YourPassword123#') \ + nms nginx-stable/nms \ + -f \ + [--version ] \ + --wait + ``` + + - Replace `` with the path to the `values.yaml` file you created]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md#configure-chart" >}}). + - Replace `YourPassword123#` with a secure password that includes uppercase and lowercase letters, numbers, and special characters. + + {{}} Save this password for future use. Only the encrypted password is stored in Kubernetes, and you can’t recover or reset it later. {{}} + - (Optional) Replace with the desired version number. If you don’t specify a version, the latest version will be installed. + + +--- + +## Uninstall NGINX Instance Manager {#helm-uninstall-nim} + +To uninstall: + +```bash +helm uninstall --namespace nms nms +``` + +This deletes the `nms` application and all associated Kubernetes resources. + +--- + +## Troubleshooting + +For instructions on creating a support package to share with NGINX Customer Support, see [Create a Support Package from a Helm Installation]({{< relref "/nms/support/k8s-support-package.md" >}}). + diff --git a/content/nim/deploy/kubernetes/frequently-used-helm-configs.md b/content/nim/deploy/kubernetes/frequently-used-helm-configs.md new file mode 100644 index 000000000..44147b2a4 --- /dev/null +++ b/content/nim/deploy/kubernetes/frequently-used-helm-configs.md @@ -0,0 +1,215 @@ +--- +docs: DOCS-1275 +doctypes: +- task +tags: +- docs +title: Frequently used Helm configurations +toc: true +weight: 400 +--- + +## Overview + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +This guide provides frequently used configurations for NGINX Instance Manager. To apply any of these configurations, modify the `values.yaml` file accordingly. + +Refer to the [configurable Helm settings]({{< relref "/nim/deploy/kubernetes/helm-config-settings.md" >}}) guide for a complete list of configurable parameters and default values used by NGINX Instance Manager and its modules when installing from a Helm chart. + +--- + +## Use your own ClickHouse installation + +NGINX Instance Manager requires a [ClickHouse](https://clickhouse.com) database server for storing metrics data. ClickHouse is an open-source, column-based, high-performance analytics database that allows real-time queries on large amounts of data. + +By default, the Helm chart installs ClickHouse, which is enabled in the `values.yaml` file by setting `nms-hybrid.nmsClickhouse.enabled` to `true`. + +To use your own ClickHouse installation, follow these steps: + +1. Set `nms-hybrid.nmsClickhouse.enabled` to `false`. +2. Add values for `nms-hybrid.externalClickhouse.address`, `.user`, and `.password` that match your ClickHouse installation. + + {{< note >}}The `nms-hybrid.externalClickhouse` field is required when `nms-hybrid.nmsClickhouse` is disabled.{{}} + +--- + +## Use your own certificates + +{{< production >}} +This section is recommended for production deployments. +{{< /production >}} + +NGINX Instance Manager generates a certificate authority and self-signs its certificates by default. + +To use your own certificates, follow these steps: + +1. Open `values.yaml` for editing. +2. Add the name of a Kubernetes secret to `nms-hybrid.apigw.tlsSecret`. The following fields are required: + + - `tls.crt` + - `tls.key` + - `ca.pem` + + **Example Kubernetes secret:** + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: apigw-tls + type: kubernetes.io/tls + data: + tls.crt: | + + tls.key: | + + ca.pem: | + + ``` + +--- + +## Adjust storage settings or disable persistent storage + +You can review and adjust the deployment's default resource and storage settings by editing the `values.yaml` file in the Helm package you downloaded. Adjust the values to meet your data needs. + +Persistent volumes are enabled by default for the ClickHouse database server and the `nms-hybrid.core` and `nms-hybrid.dpm` services. To disable persistent storage for a configuration, set `nms-hybrid.persistence.enabled` to `false`. + +--- + +## Use NGINX Plus for API Gateway + +To use NGINX Plus for the API Gateway, follow these steps: + +1. Build your own Docker image for the NGINX Plus API Gateway by providing your `nginx-repo.crt` and `nginx-repo.key`. Download the certificate and key from the [MyF5 website](https://my.f5.com) and add them to your build context. + + Use the following example Dockerfile to build the image. In this example, we use `apigw:` as the base image, which you obtained when you [downloaded the Helm package]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md#download-helm-package" >}}). + + **Example Dockerfile:** + + ```dockerfile + # syntax=docker/dockerfile:1 + + # NGINX PLUS API-GW + # NOTE: + # NMS does not publish this Docker image and these are only instructions on how to build API-GW with NGINX-PLUS. + # This Docker build should be performed by the customer using their own nginx-repo.crt and nginx-repo.key. + # API-GW with NGINX-PLUS is needed to enable OIDC. + + # Download NMS API gateway Docker image from MyF5 Downloads, https://docs.nginx.com/nginx-management-suite/installation/helm-chart/ + # Replace "apigw:" with a known release tag. + # For example: apigw:2.6.0 + + FROM apigw: as apigw-plus + + ARG REPO_PATH=. + + # Define NGINX versions for NGINX Plus and NGINX Plus modules + # Uncomment this block and the versioned nginxPackages in the main RUN + # instruction to install a specific release + # ENV NGINX_VERSION 21 + # ENV NJS_VERSION 0.3.9 + # ENV PKG_RELEASE 1 + + # Remove any previous version of NGINX + RUN apk del nginx* + + # Download certificate and key from the customer portal (https://cs.nginx.com) + # and copy them to the build context + COPY ${REPO_PATH}/nginx-repo.crt /etc/apk/cert.pem + COPY ${REPO_PATH}/nginx-repo.key /etc/apk/cert.key + + RUN set -x \ + # Install the latest release of NGINX Plus and/or NGINX Plus modules + # Uncomment individual modules if necessary + # Use versioned packages over defaults to specify a release + && nginxPackages=" \ + nginx-plus \ + # nginx-plus=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-plus-module-njs \ + # nginx-plus-module-lua \ + # nginx-plus-module-xslt \ + # nginx-plus-module-xslt=${NGINX_VERSION}-${PKG_RELEASE} \ + # nginx-plus-module-geoip \ + # nginx-plus-module-geoip=${NGINX_VERSION}-${PKG_RELEASE} \ + # nginx-plus-module-image-filter \ + # nginx-plus-module-image-filter=${NGINX_VERSION}-${PKG_RELEASE} \ + # nginx-plus-module-perl \ + # nginx-plus-module-perl=${NGINX_VERSION}-${PKG_RELEASE} \ + # nginx-plus-module-njs=${NGINX_VERSION}.${NJS_VERSION}-${PKG_RELEASE} \ + " \ + KEY_SHA512="de7031fdac1354096d3388d6f711a508328ce66c168967ee0658c294226d6e7a161ce7f2628d577d56f8b63ff6892cc576af6f7ef2a6aa2e17c62ff7b6bf0d98 *stdin" \ + && apk add --no-cache --virtual .cert-deps \ + openssl vim \ + && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \ + && if [ "$(openssl rsa -pubin -in /tmp/nginx_signing.rsa.pub -text -noout | openssl sha512 -r)" = "$KEY_SHA512" ]; then \ + echo "key verification succeeded!"; \ + mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \ + else \ + echo "key verification failed!"; \ + exit 1; \ + fi \ + && apk del .cert-deps \ + && apk add -X "https://plus-pkgs.nginx.com/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ + && if [ -n "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi \ + && if [ -n "/etc/apk/cert.key" && -n "/etc/apk/cert.pem"]; then rm -f /etc/apk/cert.key /etc/apk/cert.pem; fi \ + # Bring in gettext so we can get `envsubst`, then throw + # the rest away. To do this, we need to install `gettext` + # then move `envsubst` out of the way so `gettext` can + # be deleted completely, then move `envsubst` back. + && apk add --no-cache --virtual .gettext gettext \ + && mv /usr/bin/envsubst /tmp/ \ + \ + && runDeps="$( \ + scanelf --needed --nobanner /tmp/envsubst \ + | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \ + | sort -u \ + | xargs -r apk info --installed \ + | sort -u \ + )" \ + && apk add --no-cache $runDeps \ + && apk del .gettext \ + && mv /tmp/envsubst /usr/local/bin/ \ + # Bring in tzdata so users can set timezones through environment variables + && apk add --no-cache tzdata \ + # Bring in curl and ca-certificates to make registering on DNS SD easier + && apk add --no-cache curl ca-certificates \ + # Forward request and error logs to Docker log collector + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + + CMD ["nginx", "-g", "daemon off;"] + + # vim:syntax=Dockerfile + ``` + +2. Tag the Docker image: + + ```shell + docker tag apigw-plus /nms-apigw-plus: + ``` + + - Replace `` with your private Docker registry. + - Replace `` with the version tag. + +3. Push the image to your private registry: + + ```shell + docker push /nms-apigw-plus: + ``` + +4. Edit the `values.yaml` file to configure the Helm chart to pull the `apigw` image from your private Docker registry: + + ```yaml + # values.yaml + nms-hybrid: + imagePullSecrets: + - name: regcred + apigw: + image: + repository: /nms-apigw-plus + tag: + ``` + +This configuration specifies the name of the secret that should be used for pulling images (`regcred`) and configures the `apigw` image to be pulled from your private Docker registry. diff --git a/content/nim/deploy/kubernetes/helm-config-settings.md b/content/nim/deploy/kubernetes/helm-config-settings.md new file mode 100644 index 000000000..4a777b591 --- /dev/null +++ b/content/nim/deploy/kubernetes/helm-config-settings.md @@ -0,0 +1,131 @@ +--- +docs: DOCS-1112 +doctypes: +- reference +tags: +- docs +title: Configurable Helm settings +toc: true +weight: 300 +--- + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +This reference guide lists the configurable Helm chart parameters and default settings for NGINX Instance Manager. + +## NGINX Instance Manager Helm chart settings {#helm-settings} + +The following table lists the configurable parameters and default values for NGINX Instance Manager when installing from a Helm chart. + +To modify a configuration for an existing release, run the `helm upgrade` command and use `-f `, where `` is the path to a values file with your desired configuration. + +{{}} + +| Parameter | Description | Default | +|:-----------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------| +| `nms-hybrid.adminPasswordHash` | The hashed value of the password for the admin user.
    To generate the hash using `openssl`, run a command like: `openssl passwd -1 "YourPassword123#"` | N/A | +| `nms-hybrid.nmsClickhouse.enabled` | Enable this if external ClickHouse is not used. | `true` | +| `nms-hybrid.nmsClickhouse.fullnameOverride` | Modify the name of ClickHouse resources. | `clickhouse` | +| `nms-hybrid.nmsClickhouse.image.repository` | Repository name and path for the public ClickHouse image. | `clickhouse/clickhouse-server` | +| `nms-hybrid.nmsClickhouse.image.tag` | Tag used for pulling images from the registry. | `21.3.20.1-alpine` | +| `nms-hybrid.nmsClickhouse.image.pullPolicy` | Image pull policy. | `IfNotPresent` | +| `nms-hybrid.nmsClickhouse.user` | Username to connect to the ClickHouse server. | N/A | +| `nms-hybrid.nmsClickhouse.password` | Password for the ClickHouse server. | N/A | +| `nms-hybrid.nmsClickhouse.service.name` | ClickHouse service name. | `clickhouse` | +| `nms-hybrid.nmsClickhouse.service.rpcPort` | ClickHouse service port. | `9000` | +| `nms-hybrid.nmsClickhouse.resources.requests.cpu` | Minimum required CPU to run the ClickHouse server. | `500m` | +| `nms-hybrid.nmsClickhouse.resources.requests.memory` | Minimum required memory to run the ClickHouse server. | `1Gi` | +| `nms-hybrid.nmsClickhouse.persistence.enabled` | Use PVCs to persist ClickHouse data. | `true` | +| `nms-hybrid.nmsClickhouse.persistence.existingClaim` | Name of an existing Persistent Volume Claim (PVC) to use for ClickHouse persistence. | N/A | +| `nms-hybrid.nmsClickhouse.persistence.storageClass` | Storage class to use for creating a ClickHouse PVC. | | +| `nms-hybrid.nmsClickhouse.persistence.volumeName` | Name to use for a ClickHouse PVC volume. | | +| `nms-hybrid.nmsClickhouse.persistence.accessMode` | PVC access mode for ClickHouse. | `ReadWriteOnce` | +| `nms-hybrid.nmsClickhouse.persistence.size` | PVC size for ClickHouse. | `1G` | +| `nms-hybrid.nmsClickhouse.tolerations` | List of Kubernetes tolerations, if any. | See [Kubernetes taints and tolerations](#kubernetes-taints-and-tolerations) | +| `nms-hybrid.externalClickhouse.address` | Address of the external ClickHouse service. | | +| `nms-hybrid.externalClickhouse.user` | User of the external ClickHouse service. | | +| `nms-hybrid.externalClickhouse.password` | Password of the external ClickHouse service. | | +| `nms-hybrid.serviceAccount.annotations` | Set custom annotations for the service account used by NGINX Instance Manager. | `{}` | +| `nms-hybrid.apigw.name` | Name used for API Gateway resources. | `apigw` | +| `nms-hybrid.apigw.tlsSecret` | By default, this Helm chart creates its own Certificate Authority (CA) to self-sign HTTPS server cert key pairs. These are not managed by NGINX Instance Manager. You can bring your own NGINX API Gateway certificates for hosting the HTTPS server by setting `tlsSecret` to an existing Kubernetes secret name in the targeted namespace. The secret should include `tls.crt`, `tls.key`, and `ca.pem` in the data object. Using a self-provisioned "tlsSecret" is recommended for production.

    Refer to the "Use your own certificates" section in [Frequently used Helm configurations]({{< relref "/nim/deploy/kubernetes/frequently-used-helm-configs.md#use-your-own-certificates" >}}) for an example. | | +| `nms-hybrid.apigw.image.repository` | Repository name and path for the `apigw` image. | `apigw` | +| `nms-hybrid.apigw.image.tag` | Tag used for pulling images from the registry. | `latest` | +| `nms-hybrid.apigw.image.pullPolicy` | Image pull policy. | `IfNotPresent` | +| `nms-hybrid.apigw.container.port.https` | Container HTTPS port. | `443` | +| `nms-hybrid.apigw.service.name` | Service name. | `apigw` | +| `nms-hybrid.apigw.service.type` | Service type (options: `ClusterIp`, `LoadBalancer`, `NodePort`). | `ClusterIp` | +| `nms-hybrid.apigw.service.httpsPort` | Service HTTPS port. | `443` | +| `nms-hybrid.apigw.resources.requests.cpu` | Minimum required CPU to run `apigw`. | `250m` | +| `nms-hybrid.apigw.resources.requests.memory` | Minimum required memory to run `apigw`. | `256Mi` | +| `nms-hybrid.apigw.tolerations` | List of Kubernetes tolerations, if any. | See [Kubernetes taints and tolerations](#kubernetes-taints-and-tolerations) | +| `nms-hybrid.core.name` | Name used for core resources. | `core` | +| `nms-hybrid.core.image.repository` | Repository name and path for the `core` image. | `core` | +| `nms-hybrid.core.image.tag` | Tag used for pulling images from the registry. | `latest` | +| `nms-hybrid.core.image.pullPolicy` | Image pull policy. | `IfNotPresent` | +| `nms-hybrid.core.container.port.http` | Container HTTP port. | `8033` | +| `nms-hybrid.core.container.port.db` | Container database port. | `7891` | +| `nms-hybrid.core.container.port.grpc` | Container gRPC port. | `8038` | +| `nms-hybrid.core.service.httpPort` | Service HTTP port. | `8033` | +| `nms-hybrid.core.service.grpcPort` | Service gRPC port. | `8038` | +| `nms-hybrid.core.resources.requests.cpu` | Minimum required CPU to run `core`. | `500m` | +| `nms-hybrid.core.resources.requests.memory` | Minimum required memory to run `core`. | `512Mi` | +| `nms-hybrid.core.persistence.enabled` | Enable persistence for `core` service. | `true` | +| `nms-hybrid.core.persistence.claims` | An array of persistent volume claims for Dqlite and secrets. Can be modified to use an existing PVC. | See [Dqlite](#nim-dqlite-storage-configuration) and [Secrets](#nim-secrets-storage-configuration) | +| `nms-hybrid.core.persistence.storageClass` | Storage class to use for creating a `core` PVC. | | +| `nms-hybrid.core.persistence.volumeName` | Name to use for a `core` PVC volume. | | +| `nms-hybrid.core.tolerations` | List of Kubernetes tolerations, if any. | See [Kubernetes taints and tolerations](#kubernetes-taints-and-tolerations) | +| `nms-hybrid.dpm.name` | Name used for `dpm`. | `dpm` | +| `nms-hybrid.dpm.image.repository` | Repository name and path for the `dpm` image. | `dpm` | +| `nms-hybrid.dpm.image.tag` | Tag used for pulling images from the registry. | `latest` | +| `nms-hybrid.dpm.image.pullPolicy` | Image pull policy. | `IfNotPresent` | +| `nms-hybrid.dpm.container.port.http` | Container HTTP port. | `8034` | +| `nms-hybrid.dpm.container.port.nats` | Container NATS port. | `9100` | +| `nms-hybrid.dpm.container.port.db` | Container database port. | `7890` | +| `nms-hybrid.dpm.container.port.grpc` | Container gRPC port. | `8036` | + +{{
    }} + +## NGINX Instance Manager dqlite storage configuration + +```yaml + - name: dqlite + existingClaim: + size: 500Mi + accessMode: ReadWriteOnce +``` + +## NGINX Instance Manager secrets storage configuration + +```yaml + - name: secrets + existingClaim: + size: 128Mi + accessMode: ReadWriteOnce +``` + +## NGINX Instance Manager NATS storage configuration + +```yaml + - name: nats-streaming + existingClaim: + size: 1Gi + accessMode: ReadWriteOnce +``` + +## Kubernetes taints and tolerations + +The following example snippet shows a toleration for an NGINX Instance Manager API Gateway deployment. In this example, Kubernetes will tolerate the "NoExecute" effect for 60 seconds before evicting the pod from the tainted node. + +```yaml +tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 60 + - key: "node.kubernetes.io/network-unavailable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 60 +``` + +For more information, refer to the official Kubernetes [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) documentation. diff --git a/content/nim/deploy/uninstall-guide.md b/content/nim/deploy/uninstall-guide.md new file mode 100644 index 000000000..0f57dd7b9 --- /dev/null +++ b/content/nim/deploy/uninstall-guide.md @@ -0,0 +1,64 @@ +--- +description: This guide explains how to uninstall F5 NGINX Management Suite, including NGINX + Instance Manager. +docs: DOCS-804 +doctypes: task +title: Uninstall Guide +toc: true +weight: 1000 +draft: true +--- + +## F5 NGINX Management Suite {#uninstall-nms} + +{{}} +{{%tab name="CentOS, RHEL, and RPM-Based"%}} + +To uninstall NGINX Management Suite and all of its modules, complete the following steps: + +1. To uninstall NGINX Management Suite, run the following command: + + ```bash + yum remove nms-* + ``` + +2. Stop the ClickHouse service: + + ```bash + sudo systemctl stop clickhouse-server + ``` + +3. To uninstall ClickHouse, run the following command: + + ```bash + yum remove clickhouse-server + ``` + +{{%/tab%}} +{{%tab name="Debian, Ubuntu, and Deb-Based"%}} + +To uninstall NGINX Management suite and all of its modules, complete the following steps: + +1. To uninstall NGINX Management Suite, run the following command: + + ```bash + sudo apt-get remove nms-* + ``` + +2. Stop the ClickHouse service: + + ```bash + sudo systemctl stop clickhouse-server + ``` + +3. To uninstall ClickHouse, run the following command: + + ```bash + sudo apt-get remove clickhouse-server + ``` + + > **Note:** The `apt-get remove ` command will remove the package from your system, while keeping the associated configuration files for possible future use. If you want to completely remove the package and all of its configuration files, you should use `apt-get purge `. + + +{{%/tab%}} +{{}} diff --git a/content/nim/deploy/upgrade-guide.md b/content/nim/deploy/upgrade-guide.md new file mode 100644 index 000000000..4ded52430 --- /dev/null +++ b/content/nim/deploy/upgrade-guide.md @@ -0,0 +1,59 @@ +--- +description: This guide explains how to upgrade NGINX Instance Manager, NGINX Agent, and NGINX Plus. +docs: DOCS-920 +doctypes: +- tutorial +tags: +- docs +title: Upgrade Guide +toc: true +weight: 600 +draft: true +--- + +## Overview + +This guide explains how to upgrade NGINX Instance Manager, NGINX Agent, and NGINX Plus to their latest versions. It includes steps to back up your current configuration, run the upgrade script, and verify the results. You’ll also find instructions for troubleshooting any potential upgrade issues. + +{{< call-out "tip" "" "">}}Make sure to read the NGINX Instance Manager release notes for important information before upgrading.{{}} + +--- + +## {#pre-upgrade-steps} + +- Review release notes +- + + + +## Upgrade NGINX Instance Manager + +### Instance Manager + +- [Upgrade Instance Manager on a virtual machine or bare metal]({{< relref "/nim/deploy/vm-bare-metal/install.md#upgrade-nim" >}}) +- [Upgrade Instance Manager from a Helm Chart]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md#helm-upgrade-nim" >}}) +- [Upgrade Instance Manager in an offline environment]({{< relref "/nim/disconnected/offline-install-guide.md#upgrade-nim-offline" >}}) + +### Security Monitoring + +- [Upgrade Security Monitoring on a virtual machine or bare metal]({{< relref "/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md#upgrade-security-monitoring" >}}) + +--- + +## Upgrade NGINX Agent {#upgrade-nginx-agent} + +To upgrade NGINX Agent, refer to the [NGINX Agent Installation and Upgrade Guide](https://docs.nginx.com/nginx-agent/installation-upgrade/). + +--- + +## Upgrade NGINX Plus + +For instructions on upgrading NGINX Plus, see the [NGINX Plus Installation and Upgrade Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/). + +--- + +## Troubleshooting + +If you encounter problems with the upgrade, you can [create a support package]({{< relref "/nms/support/support-package.md" >}}). The support package script compiles system and service information into a tar archive for troubleshooting. If you contact [NGINX Customer Support]({{< relref "/nms/support/contact-support.md" >}}), they may ask you to provide the support package file. + +The [AskF5 knowledge base](https://support.f5.com/csp/home) is a helpful resource for articles related to upgrade issues and solutions. \ No newline at end of file diff --git a/content/nim/deploy/vm-bare-metal/_index.md b/content/nim/deploy/vm-bare-metal/_index.md new file mode 100644 index 000000000..1a905a4f8 --- /dev/null +++ b/content/nim/deploy/vm-bare-metal/_index.md @@ -0,0 +1,6 @@ +--- +title: Virtual Machine or Bare Metal +weight: 400 +url: /nginx-instance-manager/deploy/vm-bare-metal/ +--- + diff --git a/content/nim/deploy/vm-bare-metal/install-nim-deprecated.md b/content/nim/deploy/vm-bare-metal/install-nim-deprecated.md new file mode 100644 index 000000000..f2033ce6c --- /dev/null +++ b/content/nim/deploy/vm-bare-metal/install-nim-deprecated.md @@ -0,0 +1,319 @@ +--- +description: +docs: DOCS-1211 +doctypes: +- tutorial +tags: +- docs +title: Manually install on a virtual machine or bare metal (deprecated) +toc: true +weight: 10 +noindex: true +--- + + + +## Overview + +Follow the steps in this guide to install or upgrade NGINX Instance Manager. + +{{}} +This document outlines manual steps that have been replaced by a simplified script-based process. For most users, we recommend using the updated process documented [here]({{< relref "nim/deploy/vm-bare-metal/install.md" >}}).{{}} + +## Before You Begin + +### Security Considerations + +{{< include "installation/secure-installation.md" >}} + +### Requirements {#requirements} + +To install NGINX Instance Manager, you need the following: + +- A trial or paid subscription for NGINX Instance Manager. [Sign up for NGINX Instance Manager at MyF5](https://account.f5.com/myf5). +- A Linux instance to host the NGINX Instance Manager platform and modules +- [NGINX Plus or NGINX OSS](#install-nginx) installed on the instance hosting NGINX Instance Manager + +Allow external systems access by opening network firewalls. NGINX Instance Manager uses port `443` for both gRPC and API/web interfaces. + +--- + +## Download Certificate and Key {#download-cert-key} + +Follow these steps to download the certificate and private key for NGINX Instance Manager. You'll need these files when adding the official repository for installing NGINX Instance Manager. You can also use the certificate and key when installing NGINX Plus. + +1. On the host where you're installing NGINX Instance Manager, create the `/etc/ssl/nginx/` directory: + + ``` bash + sudo mkdir -p /etc/ssl/nginx + ``` + +2. Download the NGINX Instance Manager `.crt` and `.key` files from [MyF5](https://account.f5.com/myf5) or follow the download link in your trial activation email. + +3. Move and rename the `.crt` and `.key` files: + + ```bash + sudo mv /etc/ssl/nginx/nginx-repo.crt + sudo mv /etc/ssl/nginx/nginx-repo.key + ``` + + {{}}The downloaded filenames may vary depending on your subscription type. Modify the commands above accordingly to match the actual filenames.{{}} + +--- + +## Install NGINX {#install-nginx} + +Install NGINX Open Source or NGINX Plus on the host where you'll install NGINX Instance Manager. NGINX Instance Manager uses NGINX as a front-end proxy and for managing user access. + +- [Installing NGINX and NGINX Plus](https://docs.nginx.com/nginx/admin-guide/installing-nginx/) + + {{}}If you're installing NGINX Plus, you can use the `nginx-repo.key` and `nginx-repo.crt` that you added in the [previous section](#download-cert-key).{{}} + +
    + Supported NGINX versions + +{{< include "nim/tech-specs/supported-nginx-versions.md" >}} + +
    + +
    + Supported Linux distributions + +{{< include "nim/tech-specs/supported-distros.md" >}} + +
    + +{{}}Make sure to review the [Technical Specifications]({{< relref "tech-specs" >}}) guide for sizing requirements and other recommended specs.{{}} + +--- + +## Install ClickHouse {#install-clickhouse} + +{{}}NGINX Instance Manager requires ClickHouse 22.3.15.33 or later.{{}} + +NGINX Instance Manager uses [ClickHouse](https://clickhouse.com) to store metrics, events, and alerts, as well as configuration settings. + +Select the tab for your Linux distribution, then follow the instructions to install ClickHouse. + +{{}} + +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +To install and enable ClickHouse CentOS, RHEL, and RPM-Based distributions, take the following steps: + +1. Set up the repository: + + ``` bash + sudo yum install -y yum-utils + sudo yum-config-manager --add-repo https://packages.clickhouse.com/rpm/clickhouse.repo + ``` + +1. Install the ClickHouse server and client: + + ```bash + sudo yum install -y clickhouse-server clickhouse-client + ``` + + > **IMPORTANT!** When installing ClickHouse, you have the option to specify a password or leave the password blank (the default is an empty string). If you choose to specify a password for ClickHouse, you must also edit the `/etc/nms/nms.conf` file after installing NGINX Instance Manager and enter your ClickHouse password; otherwise, NGINX Instance Manager won't start. + > + > For more information on customizing ClickHouse settings, refer to the [Configure ClickHouse]({{< relref "nim/system-configuration/configure-clickhouse.md" >}}) topic. + +1. Enable ClickHouse so that it starts automatically if the server is restarted: + + ```bash + sudo systemctl enable clickhouse-server + ``` + +1. Start the ClickHouse server: + + ```bash + sudo systemctl start clickhouse-server + ``` + +1. Verify ClickHouse is running: + + ```bash + sudo systemctl status clickhouse-server + ``` + +{{%/tab%}} + +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +To install and enable ClickHouse on Debian, Ubuntu, and Deb-Based distributions, take the following steps: + +1. Set up the repository: + + ```bash + sudo apt-get install -y apt-transport-https ca-certificates dirmngr + GNUPGHOME=$(mktemp -d) + sudo GNUPGHOME="$GNUPGHOME" gpg --no-default-keyring --keyring /usr/share/keyrings/clickhouse-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 8919F6BD2B48D754 + sudo rm -r "$GNUPGHOME" + sudo chmod +r /usr/share/keyrings/clickhouse-keyring.gpg + + echo "deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb lts main" | sudo tee /etc/apt/sources.list.d/clickhouse.list + sudo apt-get update + ``` + +1. Install the ClickHouse server and client: + + ``` bash + sudo apt-get install -y clickhouse-server clickhouse-client + ``` + + > **IMPORTANT!** When installing ClickHouse, you have the option to specify a password or leave the password blank (the default is an empty string). If you choose to specify a password for ClickHouse, you must also edit the `/etc/nms/nms.conf` file after installing NGINX Instance Manager and enter your ClickHouse password; otherwise, NGINX Instance Manager won't start. + > + > For more information on customizing ClickHouse settings, refer to the [Configure ClickHouse]({{< relref "nim/system-configuration/configure-clickhouse.md" >}}) topic. + +1. Enable ClickHouse so that it starts automatically if the server is restarted: + + ```bash + sudo systemctl enable clickhouse-server + ``` + +1. Start the ClickHouse server: + + ``` bash + sudo systemctl start clickhouse-server + ``` + +1. Verify ClickHouse is running: + + ```bash + sudo systemctl status clickhouse-server + ``` + +{{%/tab%}} + +{{}} + +### ClickHouse Default Settings + +NGINX Instance Manager uses the following default values for ClickHouse: + +{{}}You can customize these settings. However, if you use custom settings, make sure to follow the [Configure ClickHouse]({{< relref "nim/system-configuration/configure-clickhouse.md" >}}) instructions to update the `nms.conf` file after you've installed NGINX Instance Manager; otherwise NGINX Instance Manager won't be able to connect to ClickHouse.{{}} + +{{< include "installation/clickhouse-defaults.md" >}} + +--- + +## (Optional) Install and Configure Vault {#install-vault} + +NGINX Instance Manager can use [Vault](https://www.vaultproject.io/) as a datastore for secrets. + +To install and enable Vault, take the following steps: + +- Follow Vault's instructions to [install Vault 1.8.8 or later](https://www.vaultproject.io/docs/install) for your distribution. +- Ensure you are running Vault in a [Production Hardened Environment](https://learn.hashicorp.com/tutorials/vault/production-hardening). +- After installing NGINX Instance Manager, follow the steps to [Configure Vault for Storing Secrets]({{< relref "nim/system-configuration/configure-vault.md" >}}). + +--- + +## Add NGINX Instance Manager Repository {#add-nms-repo} + +To install NGINX Instance Manager, you need to add the official repository to pull the pre-compiled `deb` and `rpm` packages from. + +{{< include "installation/add-nms-repo.md" >}} + +--- + +## Install Instance Manager + +{{}} + +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +1. To install the latest version of Instance Manager, run the following command: + + ```bash + sudo yum install -y nms-instance-manager + ``` + + > **IMPORTANT!** The Instance Manager's administrator username (default is `admin`) and generated password are displayed in the terminal during installation. You should make a note of the password and store it securely. + +{{%/tab%}} + +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +1. To install the latest version of Instance Manager, run the following commands: + + ```bash + sudo apt-get update + sudo apt-get install -y nms-instance-manager + ``` + + > **IMPORTANT!** The Instance Manager's administrator username (default is `admin`) and generated password are displayed in the terminal during installation. You should make a note of the password and store it securely. + +{{%/tab%}} + +{{}} + +2. Enable and start the NGINX Instance Manager platform services: + + ```bash + sudo systemctl enable nms nms-core nms-dpm nms-ingestion nms-integrations --now + ``` + + NGINX Instance Manager components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. + +3. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +### Post-Installation Steps + +{{< include "installation/optional-installation-steps.md" >}} + +### Accessing the Web Interface + +{{< include "installation/access-web-ui.md" >}} + + +### Add License + +{{< include "nim/admin-guide/license/connected-install-license-note.md" >}} + +--- + +## Upgrade Instance Manager {#upgrade-nim} + +{{}} +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +1. To upgrade to the latest version of the Instance Manger, run the following command: + + ```bash + sudo yum update -y nms-instance-manager + ``` + +{{%/tab%}} + +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +1. To upgrade to the latest version of the Instance Manager, run the following command: + + ```bash + sudo apt-get update + sudo apt-get install -y --only-upgrade nms-instance-manager + ``` + +{{%/tab%}} +{{}} + +2. Restart the NGINX Instance Manager platform services: + + ```bash + sudo systemctl restart nms + ``` + + NGINX Instance Manager components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. + +3. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +4. (Optional) If you use SELinux, follow the steps in the [Configure SELinux]({{< relref "nim/system-configuration/configure-selinux.md" >}}) guide to restore the default SELinux labels (`restorecon`) for the files and directories related to NGINX Management suite. diff --git a/content/nim/deploy/vm-bare-metal/install.md b/content/nim/deploy/vm-bare-metal/install.md new file mode 100644 index 000000000..5d8899adb --- /dev/null +++ b/content/nim/deploy/vm-bare-metal/install.md @@ -0,0 +1,302 @@ +--- +description: +docs: DOCS-1211 +doctypes: +- tutorial +tags: +- docs +title: Install on a virtual machine or bare metal using a script +toc: true +weight: 10 +--- + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +## Overview + +This guide explains how to install F5 NGINX Instance Manager on a virtual machine or bare metal system using the `install-nim-bundle.sh` script. + +The script simplifies the installation by automating tasks such as verifying system requirements, configuring services, and managing environment-specific options. For more control or an alternative approach, you can refer to the [manual installation guide]({{< relref "nim/deploy/vm-bare-metal/install-nim-deprecated.md" >}}), which provides detailed, step-by-step instructions. + +--- + +## Before you begin + +Follow these steps to prepare for installing NGINX Instance Manager: + +- **Download the certificate and private key** (see the steps [below](#download-cert-key)): + Use the certificate and private key for NGINX Instance Manager (the same files used for NGINX Plus). + - Ensure the files have `.crt` and `.key` extensions. + - Save them to the target system. The default locations are: + - `/etc/ssl/nginx/nginx-repo.crt` + - `/etc/ssl/nginx/nginx-repo.key` + +- **Check for previous deployments**: + Ensure that NGINX Instance Manager and its components are not already installed. + - If NGINX Instance Manager or its components (such as ClickHouse or NGINX) are detected, either follow the [upgrade instructions](#upgrade-nim) to update them or [manually remove the components](#uninstall-nim) before proceeding with the installation. + +- **Record the version details**: + Note the current version of NGINX Instance Manager and confirm the supported version of NGINX OSS or NGINX Plus you intend to use. + - By default, the script installs the latest version. + +- **(Optional) Install and configure Vault**: + If you plan to use Vault, set it up before proceeding. + +### Security considerations + +To ensure that your NGINX Instance Manager deployment remains secure, follow these recommendations: + +- Install NGINX Instance Manager on a dedicated machine (bare metal, container, cloud, or VM). +- Ensure that no other services are running on the same machine. + +--- + +## Requirements + +### Supported NGINX versions and Linux distributions + +
    + Supported NGINX versions + +{{< include "nim/tech-specs/supported-nginx-versions.md" >}} + +
    + +
    + Supported Linux distributions + +{{< include "nim/tech-specs/supported-distros.md" >}} + +
    + + + +--- + +## Download certificate and key {#download-cert-key} + +Download the certificate and private key required for NGINX Instance Manager. These files are necessary for adding the official repository during installation and can also be used when installing NGINX Plus. + +1. On the host where you're installing NGINX Instance Manager, create the **/etc/ssl/nginx/** directory: + + ```bash + sudo mkdir -p /etc/ssl/nginx + ``` + +2. Download the **SSL Certificate**, **Private Key** and ***JSON Web Token*** files from [MyF5](https://account.f5.com/myf5) or use the download link provided in your trial activation email. + +3. Move and rename the cert and key files to the correct directory: + + ```bash + sudo mv nginx-.crt /etc/ssl/nginx/nginx-repo.crt + sudo mv nginx-.key /etc/ssl/nginx/nginx-repo.key + ``` + +--- + +## Download and run the installation script {#download-install} + +### Prepare your system for installation + +Follow these steps to get your system ready for a successful installation with the `install-nim-bundle.sh` script: + +#### Resolve existing installations of NGINX Instance Manager + +The script supports only new installations. If NGINX Instance Manager is already installed, take one of the following actions: + +- **Upgrade manually** + The script cannot perform upgrades. To update an existing installation, follow the [upgrade steps](#upgrade-nim) in this document. + +- **Uninstall first** + Remove the current installation and its dependencies for a fresh start. Use the [uninstall steps](#uninstall-nim) to delete the primary components. Afterward, manually check for and remove leftover files such as repository configurations or custom settings to ensure a clean system. + +#### Verify SSL certificates and private keys + +Ensure that the required `.crt` and `.key` files are available, preferably in the default **/etc/ssl/nginx** directory. Missing certificates or keys will prevent the script from completing the installation. + +#### Use the manual installation steps if needed + +If the script fails or if you prefer more control over the process, consider using the [manual installation steps]({{< relref "nim/deploy/vm-bare-metal/install-nim-deprecated.md" >}}). These steps provide a reliable alternative for troubleshooting or handling complex setups. + +### Run the installation script + +The `install-nim-bundle.sh` script automates the installation of NGINX Instance Manager. By default, the script: + +- Assumes no prior installation of NGINX Instance Manager or its dependencies and performs a fresh installation. +- Reads SSL files from the `/etc/ssl/nginx` directory. +- Installs the latest version of NGINX Open Source (OSS). +- Installs the ClickHouse database. +- Installs NGINX Instance Manager. +- Requires an active internet connection. + +{{< warning >}} + +As noted in [About subscription licenses]({{< relref "solutions/about-subscription-licenses.md#apply-the-jwt" >}}), **custom paths won't work until you upgrade to NGINX Plus R33**. + +{{< /warning >}} + +Download the `install-nim-bundle.sh` script: + +{{}} {{}} + +When you run the script, it downloads and installs NGINX Instance Manager. + +If you want to use the script with non-default options, use these switches: + +- To point to a repository key stored in a directory other than **/etc/ssl/nginx**: `-k /path/to/your/` file +- To point to a repository certificate stored in a directory other than **/etc/ssl/nginx**: `-c /path/to/your/` file +- To install NGINX Plus (instead of NGINX OSS): `-p -j /path/to/license.jwt` + +{{< note >}} Starting from [NGINX Plus Release 33]({{< ref "nginx/releases.md#r33" >}}), a JWT file is required for each NGINX Plus instance. For more information, see [About Subscription Licenses]({{< ref "/solutions/about-subscription-licenses.md">}}). {{< /note >}} + +You also need to specify the current operating system. To get the latest list supported by the script, run the following command: + +```bash +grep '\-d distribution' install-nim-bundle.sh +``` + +For example, to use the script to install NGINX Instance Manager on Ubuntu 24.04, with repository keys in the default `/etc/ssl/nginx` directory, with the latest version of NGINX OSS, run the following command: + +```bash +sudo bash install-nim-bundle.sh -n latest -d ubuntu24.04 -j /path/to/license.jwt +``` + +To install NGINX Instance Manager on Ubuntu 24.04 with the latest version of NGINX Plus by pointing to the location of your NGINX cert and key, run the following command: + +```bash +sudo bash install-nim-bundle.sh -c /path/to/nginx-repo.crt -k /path/to/nginx-repo.key -p latest -d ubuntu24.04 -j /path/to/license.jwt +``` + +In most cases, the script completes the installation of NGINX Instance Manager and associated packages. After installation is complete, the script takes a few minutes to generate a password. At the end of the process, you'll see an autogenerated password: + +```bash +Regenerated Admin password: +``` + +Save that password. You'll need it when you sign in to NGINX Instance Manager. + +### Problems and additional script parameters + +There are multiple parameters to configure in the Installation script. If you see fatal errors when running the script, first run the following command, which includes command options that can help you bypass problems: + +```bash +bash install-nim-bundle.sh -h +``` + +### Access the web interface {#access-web-interface} + +After installation, you can access the NGINX Instance Manager web interface to begin managing your deployment. + +1. Open a web browser. +2. Navigate to `https://`, replacing `` with the fully qualified domain name of your NGINX Instance Manager host. +3. Log in using the default administrator username (`admin`) and the autogenerated password displayed during installation. + +Save the autogenerated password displayed at the end of the installation process. If you want to change the admin password, refer to the [Set user passwords]({{< relref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md#set-basic-passwords" >}}) section in the Basic Authentication topic. + +--- + +## Post-installation steps + +### Configure ClickHouse {#configure-clickhouse} + + +{{}}NGINX Instance Manager relies on [ClickHouse](https://clickhouse.com) **24.9.2.42** or later to store essential data, including metrics, events, alerts, and configuration settings.{{}} + + +{{}}The NGINX Instance Manager installation script also installs ClickHouse with a blank password. Update the **/etc/nms/nms.conf** file with it after installing NGINX Instance Manager. Otherwise, NGINX Instance Manager won't start. For more information on customizing ClickHouse settings, refer to the [Configure ClickHouse]({{< relref "/nim/system-configuration/configure-clickhouse.md" >}}) topic. {{}} + +#### ClickHouse default settings + +NGINX Instance Manager uses the following default values for ClickHouse: + +{{}}You can customize these settings. However, if you use custom settings, make sure to follow the [Configure ClickHouse]({{< relref "/nim/system-configuration/configure-clickhouse.md" >}}) instructions to update the **nms.conf** file after you've installed NGINX Instance Manager. Otherwise, NGINX Instance Manager won't be able to connect to ClickHouse.{{}} + +{{< include "installation/clickhouse-defaults.md" >}} + +### (Optional) Install and configure Vault {#install-vault} + +NGINX Instance Manager can use [Vault](https://www.vaultproject.io/) as a datastore for secrets. + +To install and enable Vault, follow these steps: + +- Follow Vault's instructions to [install Vault 1.8.8 or later](https://www.vaultproject.io/docs/install) for your distribution. +- Ensure you're running Vault in a [production-hardened environment](https://learn.hashicorp.com/tutorials/vault/production-hardening). +- After installing NGINX Instance Manager, follow the steps to [configure Vault for storing secrets]({{< relref "/nim/system-configuration/configure-vault.md" >}}). + +### (Optional) Configure SELinux + +SELinux helps secure your deployment by enforcing mandatory access control policies. + +If you use SELinux, follow the steps in the [Configure SELinux]({{< relref "/nim/system-configuration/configure-selinux.md" >}}) guide to restore SELinux contexts (`restorecon`) for the files and directories related to NGINX Instance Manager. + +### License NGINX Instance Manager + +{{< include "nim/admin-guide/license/connected-install-license-note.md" >}} + +--- + +## Upgrade NGINX Instance Manager {#upgrade-nim} + +{{}} +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +1. To upgrade to the latest version of the NGINX Instance Manager, run the following command: + + ```bash + sudo yum update -y nms-instance-manager + ``` + +1. To upgrade to the latest version of Clickhouse, run the following command: + + ```bash + sudo yum update -y clickhouse-server clickhouse-client + ``` + +{{%/tab%}} + +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +1. To upgrade to the latest version of the NGINX Instance Manager, run the following commands: + + ```bash + sudo apt-get update + sudo apt-get install -y --only-upgrade nms-instance-manager + ``` + +1. To upgrade to the latest version of Clickhouse, run the following commands: + + ```bash + sudo apt-get update + sudo apt-get install -y --only-upgrade clickhouse-server clickhouse-client + ``` + +{{%/tab%}} +{{}} + +2. Restart the NGINX Instance Manager platform services: + + ```bash + sudo systemctl restart nms + ``` + + NGINX Instance Manager components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. + +3. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +4. Restart the Clickhouse server: + + ```bash + sudo systemctl restart clickhouse-server + ``` + +5. (Optional) If you use SELinux, follow the steps in the [Configure SELinux]({{< relref "nim/system-configuration/configure-selinux.md" >}}) guide to restore the default SELinux labels (`restorecon`) for the files and directories related to NGINX Instance Manager. + +--- + +## Uninstall NGINX Instance Manager {#uninstall-nim} + +{{< include "nim/uninstall/uninstall-nim.md" >}} diff --git a/content/nim/disconnected/_index.md b/content/nim/disconnected/_index.md new file mode 100644 index 000000000..c0f6f954b --- /dev/null +++ b/content/nim/disconnected/_index.md @@ -0,0 +1,7 @@ +--- +title: Disconnected environments +weight: 30 +url: /nginx-instance-manager/disconnected/ +cascade: + type: "nim-r33" +--- \ No newline at end of file diff --git a/content/nim/disconnected/add-license-disconnected-deployment.md b/content/nim/disconnected/add-license-disconnected-deployment.md new file mode 100644 index 000000000..ecd55ac3e --- /dev/null +++ b/content/nim/disconnected/add-license-disconnected-deployment.md @@ -0,0 +1,253 @@ +--- +title: "Add a license in a disconnected environment" +date: 2024-10-14T14:34:24-07:00 +# Change draft status to false to publish doc. +draft: false +# Description +# Add a short description (150 chars) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +weight: 200 +toc: true +tags: [ "docs" ] +# Create a new entry in the Jira DOCS Catalog and add the ticket ID (DOCS-) below +docs: "DOCS-1657" +# Taxonomies +# These are pre-populated with all available terms for your convenience. +# Remove all terms that do not apply. +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["task"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] + +--- + +## Overview + +This guide shows you how to add a license to NGINX Instance Manager in a disconnected (offline) environment. In this setup, systems don’t have internet access. You’ll download and apply your subscription’s JSON Web Token (JWT) license, then verify your entitlements with F5. + +{{< call-out "tip" "Using the REST API" "" >}}{{< include "nim/how-to-access-nim-api.md" >}}{{}} + + +## Before you begin + +### Set the operation mode to disconnected + +To configure NGINX Instance Manager for a network-restricted environment, you need to set the `mode_of_operation` to `disconnected` in the configuration file. + +{{< include "nim/disconnected/set-mode-of-operation-disconnected.md" >}} + +### Download the JWT license from MyF5 {#download-license} + +{{< include "licensing-and-reporting/download-jwt-from-myf5.md" >}} + + +
    + +## Add license and submit initial usage report {#add-license-submit-initial-usage-report} + + +{{}} + +{{%tab name="Bash script (recommended)"%}} + +### Add license and submit initial usage report with a bash script + +To add a license and submit the initial usage report in a disconnected environment, use the provided `license_usage_offline.sh` script. Run this script on a system that can access NGINX Instance Manager and connect to `https://product.apis.f5.com/` on port `443`. Replace each placeholder with your specific values. + +**Important**: The script to add a license won't work if a license has already been added. + +
    + +1. {{}}[Download license_usage_offline.sh](/scripts/license_usage_offline.sh). +1. Run the following command to allow the script to run: + + ```bash + chmod +x /license_usage_offline.sh + ``` + +1. Run the script. Replace each placeholder with your specific values: + + ``` bash + ./license_usage_offline.sh \ + -j .jwt \ + -i \ + -u admin \ + -p \ + -o report.zip \ + -s initial + ``` + + This command adds the license, downloads the initial usage report (`report.zip`), submits the report to F5 for acknowledgment, and uploads the acknowledgment back to NGINX Instance Manager. + +{{< include "nim/disconnected/license-usage-offline-script.md" >}} + +{{%/tab%}} + +{{%tab name="REST"%}} + +### Add license and submit initial usage report with curl + +To license NGINX Instance Manager, complete each of the following steps in order. + +**Important**: The `curl` command to add a license won't work if a license has already been added. + +Run these `curl` commands on a system that can access NGINX Instance Manager and connect to `https://product.apis.f5.com/` on port `443`. Replace each placeholder with your specific values. + +{{}}The `-k` flag skips SSL certificate validation. Use this only if your NGINX Instance Manager is using a self-signed certificate or if the certificate is not trusted by your system.{{}} + +1. **Add the license to NGINX Instance Manager**: + + ``` bash + curl -k --location 'https:///api/platform/v1/license?telemetry=true' \ + --header 'Origin: https://' \ + --header 'Referer: https:///ui/settings/license' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Basic ' \ + --data '{ + "metadata": { + "name": "license" + }, + "desiredState": { + "content": "" + } + }' + ``` + +1. **Poll the license status on NGINX Instance Manager**: + + Use this command to check the current license status. Look for `INITIALIZE_ACTIVATION_COMPLETE` or `CONFIG_REPORT_READY` in the status field. Poll periodically if necessary. + + ```bash + curl -k "https:///api/platform/v1/license" \ + --header "accept: application/json" \ + --header "authorization: Basic " \ + --header "referer: https:///ui/settings/license" + ``` + +1. **Update the license configuration on NGINX Instance Manager**: + + This step ensures that the license configuration is fully applied. + + ```bash + curl -k --location --request PUT "https:///api/platform/v1/license?telemetry=true" \ + --header "Origin: https://" \ + --header "Referer: https:///ui/settings/license" \ + --header "Content-Type: application/json" \ + --header "Authorization: Basic " \ + --data '{ + "desiredState": { + "content": "", + "type": "JWT", + "features": [ + {"limit": 0, "name": "NGINX_NAP_DOS", "valueType": ""}, + {"limit": 0, "name": "IM_INSTANCES", "valueType": ""}, + {"limit": 0, "name": "TM_INSTANCES", "valueType": ""}, + {"limit": 0, "name": "DATA_PER_HOUR_GB", "valueType": ""}, + {"limit": 0, "name": "NGINX_INSTANCES", "valueType": ""}, + {"limit": 0, "name": "NGINX_NAP", "valueType": ""}, + {"limit": 0, "name": "SUCCESSFUL_API_CALLS_MILLIONS", "valueType": ""}, + {"limit": 0, "name": "IC_PODS", "valueType": ""}, + {"limit": 0, "name": "IC_K8S_NODES", "valueType": ""} + ] + }, + "metadata": { + "name": "license" + } + }' + ``` + +1. **Download the initial usage report**: + + ```bash + curl -k --location 'https:///api/platform/v1/report/download?format=zip&reportType=initial' \ + --header 'accept: */*' \ + --header 'Authorization: Basic ' \ + --output report.zip + ``` + +1. **Submit the usage report to F5 for verification**: + + ```bash + curl --location 'https://product.apis.f5.com/ee/v1/entitlements/telemetry/bulk' \ + --header "Authorization: Bearer $(cat /path/to/jwt-file)" \ + --form 'file=@".zip"' + ``` + + After running this command, look for the "statusLink" in the response. The `report-id` is the last part of the "statusLink" value (the UUID). For example: + + ```json + {"statusLink":"/status/2214e480-3401-43a3-a54c-9dc501a01f83"} + ``` + + In this example, the `report-id` is `2214e480-3401-43a3-a54c-9dc501a01f83`. + + You’ll need to use your specific `report-id` in the following steps. + +2. **Check the status of the usage acknowledgment**: + + Replace `` with your specific ID from the previous response. + + ``` bash + curl --location 'https://product.apis.f5.com/ee/v1/entitlements/telemetry/bulk/status/' \ + --header "Authorization: Bearer $(cat /path/to/jwt-file)" + ``` + +3. **Download the usage acknowledgement from F5**: + + ``` bash + curl --location 'https://product.apis.f5.com/ee/v1/entitlements/telemetry/bulk/download/' \ + --header "Authorization: Bearer $(cat /path/to/jwt-file)" \ + --output .zip + ``` + +4. **Upload the usage acknowledgement to NGINX Instance Manager**: + + ``` bash + curl -k --location 'https:///api/platform/v1/report/upload' \ + --header 'Authorization: Basic ' \ + --form 'file=@".zip"' + ``` + +{{%/tab%}} + +{{%tab name="Web interface"%}} + +### Add license and submit initial usage report with the web interface + +#### Add license + +To add a license: + +{{< include "nim/admin-guide/license/add-license-webui.md" >}} + +#### Download initial usage report + +Download the initial usage report to send to F5: + +- On the **License > Overview** page, select **Download License Report**. + + +#### Submit usage report to F5 + +You need to submit the usage report to F5 and download the acknowledgment over REST. To do do, follow steps 5–7 in the [**REST**](#add-license-submit-initial-usage-report) tab in this section. + +#### Upload the usage acknowledge to NGINX Instance Manager + +To upload the the usage acknowledgement: + +1. On the **License > Overview** page, select **Upload Usage Acknowledgement**. +2. Upload the acknowledgement by selecting **Browse** or dragging the file into the form. +3. Select **Add**. + +{{%/tab%}} + + +{{
    }} + + + + diff --git a/content/nim/disconnected/offline-install-guide-deprecated.md b/content/nim/disconnected/offline-install-guide-deprecated.md new file mode 100644 index 000000000..84af7aa3a --- /dev/null +++ b/content/nim/disconnected/offline-install-guide-deprecated.md @@ -0,0 +1,194 @@ +--- +title: Manually install in a disconnected environment (deprecated) +weight: 100 +toc: true +noindex: true +type: how-to +product: NIM +docs: DOCS-000 +--- + +{{}} +This document outlines manual steps that have been replaced by a simplified script-based process. For most users, we recommend using the updated process documented [here]({{< relref "nim/disconnected/offline-install-guide.md" >}}).{{}} + +## Overview + +This guide explains how to install and upgrade NGINX Instance Manager in environments without Internet access. It covers key steps, including downloading packages, managing dependencies, and configuring the system for offline use. You’ll also learn how to set up NGINX Instance Manager in disconnected mode and manually update the CVE list to keep your system secure. + +## Before you begin + +{{}} +You must complete the following prerequisite steps **before** installing NGINX Instance Manager. **Skipping these steps could cause installation issues**. +{{}} + +### Security considerations + +To ensure that your NGINX Instance Manager deployment remains secure, follow these recommendations: + +- Install NGINX Instance Manager on a dedicated machine (bare metal, container, cloud, or VM). +- Make sure no other services are running on the same machine. +- Ensure the machine is not accessible from the Internet. +- Place the machine behind a firewall. + +### Download package files + +To complete the steps in this guide, you need to download the NGINX Instance Manager package files from the [MyF5 Customer Portal](https://account.f5.com/myf5). + +### Install local dependencies + +Local dependencies are common Linux packages like `curl` or `openssl`, which most Linux distributions include by default. When installing NGINX Instance Manager, your package manager will automatically install these dependencies. Without internet access, ensure your package manager can use a local package repository, such as a distribution DVD/ISO image or internal network mirror. Check your Linux distribution's documentation for details. + +{{< call-out "note" "RedHat on AWS" "fa-brands fa-aws" >}}If you're using AWS and can't attach remote or local RedHat package repositories, download the necessary packages on another RedHat machine and copy them to your target machine. Use the `yumdownloader` utility for this task: +. +{{}} + +### Download and install external dependencies + +External dependencies, such as ClickHouse and NGINX Plus, aren't included by default in standard Linux distributions. You need to manually download and transfer these to your offline system. + +To download external dependencies: + +1. Download the `fetch-external-dependencies.sh` script: + + {{}} {{}} + +2. Run the script to download the external dependencies for your specific Linux distribution: + + ```bash + sudo bash fetch-external-dependencies.sh + ``` + + Supported Linux distributions: + + - `ubuntu20.04` + - `ubuntu22.04` + - `debian11` + - `debian12` + - `oracle7` + - `oracle8` + - `rhel8` + - `rhel9` + - `amzn2` + + **For example**, to download external dependencies for Ubuntu 20.04: + + ```bash + sudo bash fetch-external-dependencies.sh ubuntu20.04 + ``` + + This will create an archive, such as `nms-dependencies-ubuntu20.04.tar.gz`, containing the required dependencies. + +3. Copy the archive to your target machine and extract the contents: + + {{< note >}}The bundled NGINX server package may conflict with existing versions of NGINX or NGINX Plus. Delete the package from the bundle if you want to keep your current version.{{}} + + - **For RHEL and RPM-Based systems**: + + ```bash + tar -kzxvf nms-dependencies-.tar.gz + sudo rpm -ivh *.rpm + ``` + + - **For Debian, Ubuntu, Deb-based systems**: + + ```bash + tar -kzxvf nms-dependencies-.tar.gz + sudo dpkg -i ./*.deb + ``` + + {{< call-out "important" "Setting a custom ClickHouse password" "fas fa-exclamation-triangle" >}} + + When installing ClickHouse, you can set a password or leave it blank (default is an empty string). If you set a password, make sure to update the **/etc/nms/nms.conf** file with it after installing NGINX Instance Manager. Otherwise, NGINX Instance Manager won't start. For more information on customizing ClickHouse settings, refer to the [Configure ClickHouse]({{< relref "/nim/system-configuration/configure-clickhouse.md" >}}) topic. + + {{}} + + +--- + +## Install NGINX Instance Manager {#install-nim-offline} + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the NGINX Instance Manager package files. + +2. Install the NGINX Instance Manager package: + + - **For RHEL and RPM-based systems**: + + ```bash + sudo rpm -ivh --nosignature /home//nms-instance-manager_.x86_64.rpm + ``` + + - **For Debian, Ubuntu, Deb-based systems**: + + ```bash + sudo apt-get -y install -f /home//nms-instance-manager__amd64.deb + ``` + + {{< call-out "important" "Save the password!" "fas fa-exclamation-triangle" >}} + The administrator username (default: **admin**) and the generated password are displayed in the terminal during installation. Be sure to record the password and store it securely. + {{}} + +3. Enable and start NGINX Instance Manager services: + + ```bash + sudo systemctl enable nms nms-core nms-dpm nms-ingestion nms-integrations --now + ``` + + {{< include "installation/nms-user.md" >}} + +4. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +--- + +## Set the operation mode to disconnected {#set-mode-disconnected} + +{{< include "nim/disconnected/set-mode-of-operation-disconnected.md" >}} + +--- + +## Post-installation steps (optional) + +{{< include "installation/optional-installation-steps.md" >}} + +## Upgrade NGINX Instance Manager {#upgrade-nim-offline} + +To upgrade NGINX Instance Manager to a newer version: + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the latest package files. +2. Upgrade the package: + - **For RHEL and RPM-based systems**: + + ``` bash + sudo rpm -Uvh --nosignature /home/user/nms-instance-manager_.x86_64.rpm + sudo systemctl restart nms + sudo systemctl restart nginx + ``` + + - **For Debian, Ubuntu, Deb-based systems**: + + ```bash + sudo apt-get -y install -f /home/user/nms-instance-manager__amd64.deb + sudo systemctl restart nms + sudo systemctl restart nginx + ``` + + {{< include "installation/nms-user.md" >}} + +3. (Optional) If you use SELinux, follow the [Configure SELinux]({{< relref "/nim/system-configuration/configure-selinux.md" >}}) guide to restore SELinux contexts using restorecon for files and directories related to NGINX Instance Manager. + +--- + +## CVE checking {#cve-check} + +To manually update the CVE list in an air-gapped environment, follow these steps to download and overwrite the `cve.xml` file in the `/usr/share/nms` directory and restart the Data Plane Manager service: + +```bash +sudo chmod 777 /usr/share/nms/cve.xml && \ +sudo curl -s http://hg.nginx.org/nginx.org/raw-file/tip/xml/en/security_advisories.xml > /usr/share/nms/cve.xml && \ +sudo chmod 644 /usr/share/nms/cve.xml && \ +sudo systemctl restart nms-dpm +``` + diff --git a/content/nim/disconnected/offline-install-guide.md b/content/nim/disconnected/offline-install-guide.md new file mode 100644 index 000000000..aca4beeda --- /dev/null +++ b/content/nim/disconnected/offline-install-guide.md @@ -0,0 +1,191 @@ +--- +title: Install in a disconnected environment using a script +toc: true +weight: 100 +type: how-to +product: NIM +docs: DOCS-803 +--- + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +## Overview + +This guide shows you how to install and upgrade NGINX Instance Manager in environments without internet access. It covers key steps, including downloading packages, managing dependencies, and configuring the system for offline use. You’ll also learn how to set up NGINX Instance Manager in disconnected mode and update the CVE list manually to keep your system secure. + +{{}}If you prefer to follow the original manual steps, you can access the [deprecated guide]({{< relref "nim/disconnected/offline-install-guide-deprecated.md" >}}). Please note that this guide is no longer actively maintained and may not reflect the latest updates or best practices.{{}} + +--- + +## Before you begin + +You’ll need internet access for the steps in this section. + +### Prepare your system for installation + +Follow these steps to get your system ready for a successful installation with the `install-nim-bundle.sh` script: + +#### Resolve existing installations of NGINX Instance Manager + +The script supports only new installations. If NGINX Instance Manager is already installed, take one of the following actions: + +- **Upgrade manually** + The script cannot perform upgrades. To update an existing installation, follow the [upgrade steps](#upgrade-nim) in this document. + +- **Uninstall first** + Remove the current installation and its dependencies for a fresh start. Use the [uninstall steps](#uninstall-nim) to delete the primary components. Afterward, manually check for and remove leftover files such as repository configurations or custom settings to ensure a clean system. + +#### Verify SSL certificates and private keys + +Ensure that the required `.crt` and `.key` files are available, preferably in the default **/etc/ssl/nginx** directory. Missing certificates or keys will prevent the script from completing the installation. + +#### Use the manual installation steps if needed + +If the script fails or if you prefer more control over the process, consider using the [manual installation steps]({{< relref "nim/disconnected/offline-install-guide-deprecated.md" >}}). These steps provide a reliable alternative for troubleshooting or handling complex setups. + +### Download the SSL Certificate and Private Key from MyF5 + +Download the SSL certificate and private key required for NGINX Instance Manager: + +1. Log in to [MyF5](https://my.f5.com/manage/s/). +1. Go to **My Products & Plans > Subscriptions** to see your active subscriptions. +1. Find your NGINX products or services subscription, and select the **Subscription ID** for details. +1. Download the **SSL Certificate** and **Private Key** files. + +### Download the installation script + +{{}} {{}} + +To run the script, enter the following command, replacing `` and `` with the full paths and filenames of your SSL certificate and private key files: + +```shell +sudo bash install-nim-bundle.sh \ + -c \ + -k \ + -m offline \ + -d \ + -p \ + -v \ + -j +``` + +
    + +By default, this command installs the latest version of NGINX Open Source. To install NGINX Plus or specify a different version of NGINX Open Source, use the `-p` or `-n` options as needed. + +{{< note >}} Starting from [NGINX Plus Release 33]({{< ref "nginx/releases.md#r33" >}}), a JWT file is required for each NGINX Plus instance. For more information, see [About Subscription Licenses]({{< ref "/solutions/about-subscription-licenses.md">}}). {{< /note >}} + +
    + +**Explanation of options:** + +- **`-c`**: Uses the specified SSL certificate file. Copies the file to the /etc/ssl/nginx directory. +- **`-k`**: Uses the specified private key file. Copies the file to the /etc/ssl/nginx directory. +- **`-m`**: Sets the installation mode (use `offline` for disconnected environments). +- **`-d`**: Defines the target distribution (replace `` with one of the supported options below). +- **`-n`**: Installs a specific version of NGINX Open Source. Use `latest` to install the most recent version or specify a version like `1.27.1`. If neither `-n` nor `-p` is specified, the script defaults to installing the latest version of NGINX Open Source. +- **`-p`**: Installs the specified version of NGINX Plus. Use `latest` for the newest version or a specific release like `R32`. Overrides the `-n` option if both are specified. +- **`-v`**: Installs the specified version of NGINX Instance Manager. Use `latest` for the newest version or a specific release like `2.18.0`. If you skip this option, the script assumes you want to install `latest`. +- **`-j`**: Uses the specified JWT token. + + +**Supported distributions:** + +To get the latest list supported by the script, run the following command: + +```bash +grep '\-d distribution' install-nim-bundle.sh +``` + +The script downloads the required packages and adds them to a tarball file. You’ll need to copy this tarball to the target machine in the disconnected environment. + +--- + +## Install NGINX Instance Manager + +1. Copy the following files to the target system: + - `install-nim-bundle.sh` script + - SSL certificate file + - Private key file + - Tarball file with the required packages + +2. Run the installation script: + + ```shell + sudo bash install-nim-bundle.sh \ + -c + -k \ + -m offline \ + -d \ + -i + ``` + +3. **Save the admin password**. In most cases, the script completes the installation of NGINX Instance Manager and associated packages. After installation is complete, the script takes a few minutes to generate a password. At the end of the process, you'll see an autogenerated password: + + ```shell + Regenerated Admin password: + ``` + + Save that password. You'll need it when you sign in to NGINX Instance Manager. + +3. After installation, open a web browser, go to `https://` (the fully qualified domain name of the NGINX Instance Manager host), and log in. + +--- + +## Set the operation mode to disconnected {#set-mode-disconnected} + +{{< include "nim/disconnected/set-mode-of-operation-disconnected.md" >}} + +--- + +## Post-installation steps (optional) + +{{< include "installation/optional-installation-steps.md" >}} + +--- + +## Upgrade NGINX Instance Manager {#upgrade-nim} + +To upgrade NGINX Instance Manager to a newer version: + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the latest package files. +2. Upgrade the package: + - **For RHEL and RPM-based systems**: + + ```shell + sudo rpm -Uvh --nosignature /home/user/nms-instance-manager_.x86_64.rpm + sudo systemctl restart nms + sudo systemctl restart nginx + ``` + + - **For Debian, Ubuntu, Deb-based systems**: + + ```shell + sudo apt-get -y install -f /home/user/nms-instance-manager__amd64.deb + sudo systemctl restart nms + sudo systemctl restart nginx + ``` + + {{< include "installation/nms-user.md" >}} + +3. (Optional) If you use SELinux, follow the [Configure SELinux]({{< relref "/nim/system-configuration/configure-selinux.md" >}}) guide to restore SELinux contexts using restorecon for files and directories related to NGINX Instance Manager. + +--- + +## Uninstall NGINX Instance Manager {#uninstall-nim} + +{{< include "nim/uninstall/uninstall-nim.md" >}} + +--- + +## CVE checking {#cve-check} + +To manually update the CVE list in an air-gapped environment, follow these steps to download and overwrite the `cve.xml` file in the `/usr/share/nms` directory and restart the Data Plane Manager service: + +```shell +sudo chmod 777 /usr/share/nms/cve.xml && \ +sudo curl -s http://hg.nginx.org/nginx.org/raw-file/tip/xml/en/security_advisories.xml > /usr/share/nms/cve.xml && \ +sudo chmod 644 /usr/share/nms/cve.xml && \ +sudo systemctl restart nms-dpm +``` + diff --git a/content/nim/disconnected/report-usage-disconnected-deployment.md b/content/nim/disconnected/report-usage-disconnected-deployment.md new file mode 100644 index 000000000..fa5a5d851 --- /dev/null +++ b/content/nim/disconnected/report-usage-disconnected-deployment.md @@ -0,0 +1,187 @@ +--- +title: "Report usage to F5 in a disconnected environment" +date: 2024-10-14T14:29:40-07:00 +# Change draft status to false to publish doc. +draft: false +# Assign weights in increments of 100 +weight: 300 +toc: true +tags: [ "docs" ] +# Create a new entry in the Jira DOCS Catalog and add the ticket ID (DOCS-) below +docs: "DOCS-1658" +# Taxonomies +# These are pre-populated with all available terms for your convenience. +# Remove all terms that do not apply. +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["task"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] + +--- + +## Overview + +In a disconnected environment without internet access, NGINX Plus sends usage data to NGINX Instance Manager. You’ll need to download the usage report from NGINX Instance Manager and submit it to F5 from a location with internet access. After F5 verifies the report, you can download the acknowledgement, which you must upload back to NGINX Instance Manager. + +--- + +## Before you begin + +Before submitting usage data to F5, first configure NGINX Plus to report telemetry data to NGINX Instance Manager. + +### Configure NGINX Plus to report usage to NGINX Instance Manager + +To configure NGINX Plus (R33 and later) to report usage data to NGINX Instance Manger: + +{{< include "licensing-and-reporting/configure-nginx-plus-report-to-nim.md" >}} + +--- + +## Submit usage report to F5 {#submit-usage-report} + +{{< call-out "tip" "Using the REST API" "" >}}{{< include "nim/how-to-access-nim-api.md" >}}{{}} + +
    + +{{}} + +{{%tab name="bash script (recommended)"%}} + +### Submit usage report with a bash script + +To submit a usage report in a disconnected environment, use the provided `license_usage_offline.sh` script. Run this script on a system that can access NGINX Instance Manager and connect to `https://product.apis.f5.com/` on port `443`. Replace each placeholder with your specific values. + +
    + +1. {{}}[Download license_usage_offline.sh](/scripts/license_usage_offline.sh). +1. Run the following command to allow the script to run: + + ```bash + chmod +x /license_usage_offline.sh + ``` + +1. Run the script. Replace each placeholder with your specific values: + + ``` bash + ./license_usage_offline.sh \ + -j .jwt \ + -i \ + -u admin \ + -p \ + -o report.zip \ + -s telemetry + ``` + + This command downloads the usage report (`report.zip`), submits the report to F5 for acknowledgment, and uploads the acknowledgment back to NGINX Instance Manager. + +{{< include "nim/disconnected/license-usage-offline-script.md" >}} + +{{%/tab%}} + +{{%tab name="REST"%}} + +### Submit usage report with curl + +To submit a usage report using `curl`, complete each of the following steps in order. + +Run these `curl` commands on a system that can access NGINX Instance Manager and connect to `https://product.apis.f5.com/` on port `443`. Replace each placeholder with your specific values. + +{{}}The `-k` flag skips SSL certificate validation. Use this only if your NGINX Instance Manager is using a self-signed certificate or if the certificate is not trusted by your system.{{}} + +1. **Prepare the usage report**: + + ```bash + curl -k --location 'https:///api/platform/v1/report/download?format=zip&reportType=telemetry&telemetryAction=prepare' \ + --header 'accept: application/json' \ + --header 'authorization: Basic ' \ + --header 'referer: https:///ui/settings/license' + ``` + +1. **Download the usage report from NGINX Instance Manager**: + + ```bash + curl -k --location 'https:///api/platform/v1/report/download?format=zip&reportType=telemetry&telemetryAction=download' \ + --header 'accept: */*' \ + --header 'authorization: Basic ' \ + --output report.zip + ``` + +1. **Submit the usage report to F5 for verification**: + + ```bash + curl --location 'https://product.apis.f5.com/ee/v1/entitlements/telemetry/bulk' \ + --header "Authorization: Bearer $(cat /path/to/jwt-file)" \ + --form 'file=@".zip"' + ``` + + After running this command, look for the "statusLink" in the response. The `report-id` is the last part of the "statusLink" value (the UUID). For example: + + ```json + {"statusLink":"/status/2214e480-3401-43a3-a54c-9dc501a01f83"} + ``` + + In this example, the `report-id` is `2214e480-3401-43a3-a54c-9dc501a01f83`. + + You’ll need to use your specific `report-id` in the following steps. + +1. **Check the status of the usage acknowledgement**: + + Replace `` with your specific ID from the previous response. + + ```bash + curl --location 'https://product.apis.f5.com/ee/v1/entitlements/telemetry/bulk/status/' \ + --header "Authorization: Bearer $(cat /path/to/jwt-file)" + ``` + +1. **Download the usage acknowledgement from F5**: + + ```bash + curl --location 'https://product.apis.f5.com/ee/v1/entitlements/telemetry/bulk/download/' \ + --header "Authorization: Bearer $(cat /path/to/jwt-file)" \ + --output .zip + ``` + +1. **Upload the usage acknowledgement to NGINX Instance Manager**: + + ```bash + curl -k --location 'https:///api/platform/v1/report/upload' \ + --header 'Authorization: Basic ' \ + --form 'file=@".zip"' + ``` + +{{%/tab%}} + +{{%tab name="Web interface"%}} + +### Submit usage report with the web interface + +#### Download usage report + +Download the usage report to send to F5: + +- On the **License > Overview** page, select **Download License Report**. + +#### Submit usage report to F5 + +You need to submit the usage report to F5 and download the acknowledgment over REST. To do do, follow steps 3–5 in the [**REST**](#add-license-submit-initial-usage-report) tab in this section. + +#### Upload the usage acknowledgement to NGINX Instance Manager + +To upload the the usage acknowledgement: + +1. On the **License > Overview** page, select **Upload Usage Acknowledgement**. +2. Upload the acknowledgement by selecting **Browse** or dragging the file into the form. +3. Select **Add**. + +{{%/tab%}} + + +{{
    }} + +--- + +## What’s reported {#telemetry} + +{{< include "licensing-and-reporting/reported-usage-data.md" >}} diff --git a/content/nim/fundamentals/_index.md b/content/nim/fundamentals/_index.md new file mode 100644 index 000000000..aff5a42ca --- /dev/null +++ b/content/nim/fundamentals/_index.md @@ -0,0 +1,5 @@ +--- +title: Fundamentals +url: /nginx-instance-manager/fundamentals/ +weight: 1 +--- \ No newline at end of file diff --git a/content/nim/fundamentals/api-overview.md b/content/nim/fundamentals/api-overview.md new file mode 100644 index 000000000..5e3baedfc --- /dev/null +++ b/content/nim/fundamentals/api-overview.md @@ -0,0 +1,135 @@ +--- +description: +docs: DOCS-798 +doctypes: +- reference +tags: +- docs +title: "Overview: NGINX Instance Manager REST API" +toc: true +weight: 400 +--- + +{{< include "/nim/decoupling/note-legacy-nms-references.md" >}} + +## Introduction + +NGINX Instance Manager provides a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) API that uses standard authentication methods, HTTP response codes, and verbs. You can use the API to manage both NGINX Instance Manager and the NGINX instances running on your systems. + +## Object model + +The NGINX Instance Manager REST API allows you to manage both NGINX Instance Manager and NGINX instances programmatically. The API provides the following features: + +- View metrics and information about data plane host systems and NGINX instances. +- View and edit NGINX configurations. +- Save NGINX configurations for future deployment. +- Analyze saved and current configurations for syntactic errors. +- Publish configurations to NGINX instances. +- Scan the network to find unmanaged NGINX instances. +- Manage certificates. +- Create users, roles, and role permissions to manage RBAC. + +## API reference documentation + +{{< include "nim/how-to-access-api-docs.md" >}} + +## Usage + +You can use tools such as `curl` or [Postman](https://www.postman.com) to interact with the NGINX Instance Manager API. + +You can find the NGINX Instance Manager API URLs in the following format: `https:///api/platform/`. + +{{}}When making API calls by using `curl`, Postman, or any other tool, include your [authentication](#authentication) information with each call. Also include a `-k` to bypass TLS/SSL verification.{{}} + +Alternatively, in the API Reference docs, you can also use the "Try it Out" function. Since you're already logged into the NGINX Instance Manager platform, the "Try it Out" function automatically includes authentication credentials. + +To do so, take the steps below: + +1. Select the endpoint and action that you want to send. For example: `POST /infrastructure/workspaces`. +2. Select the **Try it Out** button. +3. If the endpoint accepts parameters, replace the placeholder examples in the request body with your desired values. +4. Select the **Execute** button to send the request. +5. When the request completes, the response appears in the user interface. + +## Authentication + +To use the NGINX Instance Manager API, you need to use one of the following authentication methods: + +- Basic authentication +- JSON Web Token (JWT) + +### Basic authentication + +{{< include "nim/admin-guide/auth/basic-auth-api-requests.md" >}} + +### JSON Web Token + +If your organization is using OIDC, you will be prompted to log in with your Identity Provider the first time you attempt to reach an API. After authenticating, you can request a JWT to use in subsequent API calls. + +{{}}The means of requesting a token varies according to the Identity Provider; if you're not sure which provider your organization uses, check with your system administrator or technical support team.{{}} + +Once you have a JWT, set it up as a "Bearer" using the "Authorization" request header field, as shown in the example below. + +```shell +curl -X GET "https:///api/platform//systems" -H "Authorization: Bearer " +``` + +{{< include "security/jwt-password-note.md" >}} + +## Errors and response codes + +NGINX Instance Manager uses standard HTTP response codes to indicate whether an API request succeeds or fails. Codes in the `2xx` range mean the request succeeded. Codes in the `400` range mean the request failed due to the reason(s) indicated in the response message. Common reasons for `4xx` responses are: + +- Requests where required information is missing. +- Lack of or incorrect authentication credentials. +- Requests that refer to resources that do not exist or are in use by other resources. + +**HTTP Status Codes** +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Response Code | Meaning | +|---------------|------------------------------------------------------------------| +| 200 | Success: The request was received. | +| 201 | Success: Created the requested resource. | +| 202 | Success: The request was accepted and configuration is in process.| +| 204 | Success: Deleted the requested resource. | +| 400 | Bad Request: Required information is missing or incorrectly formatted. | +| 401 | Unauthorized: You are not logged in or do not have permission to access the requested resource. | +| 404 | Not found: The requested resource does not exist. | +| 409 | Conflict: The requested resource already exists or is referenced by another resource. | + +{{< /bootstrap-table >}} + +## Encoding + +The NGINX Instance Manager API expects and returns JSON-formatted data by default. + +All JSON data must be encoded using UTF-8. If you don't specify a media type in an API call, the API uses `application/json` by default. + +## Pagination + +Top-level NGINX Instance Manager API endpoints support fetching information about multiple resources ("lists"). These requests can return large data sets (for example, `GET /events` and `GET /instances`). You can control the size of the data set returned and navigate through pages by sending additional calls. + +### Parameters + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Name | Format | Type | Description | Default value | +|------------|----------|-------|------------------------------------------------------------------------------------------------------------------------------|---------------| +| page | integer | query | The page number to retrieve. | `1` | +| pageToken| string | query | A transactional token used for pagination. The token ensures consistent query results across requests. | N/A | +| pageSize | integer | query | The number of items returned per page. The maximum value is 100. If `pageSize=0`, pagination is disabled, and all results are returned. | `100` | + +{{< /bootstrap-table >}} + +## Versioning + +Each major version of the NGINX Instance Manager API is backward-compatible with previous releases of the same version. Any backward-incompatible changes result in a new major version. + +The version is represented in the `` part of the API URI. + +For example, to use a v2 API, send requests to: + +`https:///api/platform/v2` + +When a new version is required, we release it for the entire API. You won't see a mix of v1 and v2 objects in the same API. diff --git a/content/nim/fundamentals/dashboard-overview.md b/content/nim/fundamentals/dashboard-overview.md new file mode 100644 index 000000000..d42f27753 --- /dev/null +++ b/content/nim/fundamentals/dashboard-overview.md @@ -0,0 +1,79 @@ +--- +docs: DOCS-1387 +doctypes: +- reference +tags: +- docs +title: NGINX Instance Manager dashboard +toc: true +weight: 300 +--- + +The NGINX Instance Manager dashboard gives you a high-level view of the health and performance of your NGINX instances. It provides a central place to identify, diagnose, and resolve issues in your data plane. You can also track how deployments affect the performance of individual instances and identify trends over time. + +This document walks you through the different panels in the dashboard and shows how to use them to monitor your data plane. + +You’ll need a user with access to these RBAC features to use the dashboard: + +- Instance Management +- Certificates +- Analytics + +To learn more about Role-based Access Control and the available features, see the [Getting Started with RBAC]({{< relref "/nim/admin-guide/rbac/overview-rbac.md" >}}) documentation. + +## Prerequisites + +Follow these steps to stream and display all relevant metrics in the dashboard: + +1. Install NGINX Agent v2.30 or later on your NGINX data plane instances. See the [Upgrade NGINX Agent Package](https://docs.nginx.com/nginx-agent/installation-upgrade/upgrade/) for more information. +2. Ensure that NGINX Plus or NGINX Open Source Stub Status APIs are configured to send NGINX metrics using NGINX Agent. See the [Instance Metrics Overview]({{< relref "/nim/monitoring/overview-metrics.md" >}}) for more details. + +## Certificates + +The Certificates panel shows the number of certificates in your data plane, categorized by their status. Use this panel to identify certificates that will expire in the next 30 days or have already expired. + +Select **See all certificates** to open the Certificates and Keys section, where you can view a list of all certificates in your data plane. + +Select any certificate from the list to view its details and resolve any issues. + +## CPU Utilization + +The CPU Utilization panel shows the instances in your data plane with the highest average CPU usage for the selected period, along with utilization trends. The list is sorted in descending order. Use this panel to identify instances with high CPU usage and track performance trends over time. + +Select **See more** to open a detailed view where you can see a list of all instances in your data plane along with their CPU usage averages and trends. + +Use the menu in the top-right corner to change the time range for CPU utilization data. Select any hostname in the list to view a full set of metrics specific to that instance. + +## Memory Utilization + +The Memory Utilization panel shows the instances in your data plane with the highest average memory usage for the selected period, along with their utilization trends. The list is sorted in descending order. Use this panel to identify instances with high memory usage and to track trends over time. + +Select **See more** to open a detailed view where you can see a list of all instances in your data plane along with their memory usage averages and trends. + +Use the menu in the top-right corner to change the time range for memory utilization data. Select any hostname in the list to view a full set of metrics specific to that instance. + +## Disk Space Utilization + +The Disk Space Utilization panel shows the instances in your data plane with the highest average disk usage on the root partition for the selected period, along with utilization trends. The list is sorted in descending order. Use this panel to identify instances with high disk usage and spot volumes that are nearly full. + +Select **See more** to open a detailed view where you can see a list of all instances in your data plane and their disk usage averages. Select the chevron next to the hostname to see the disk usage for each partition. + +Use the menu in the top-right corner to change the time range for disk utilization data. Select any hostname in the list to view a full set of metrics specific to that instance. + +{{< note >}}The value in the **Current Value** column reflects the disk usage of the root partition on the instance. An alert appears next to the hostname if the available space on the root partition drops below 20%.{{< /note >}} + +## Network Utilization + +The Network Utilization panel shows the instances in your data plane with the highest average inbound network traffic rate, outbound network traffic rate, or number of incoming requests for the selected period. The list is sorted in descending order. Use this panel to detect traffic spikes, identify instances with abnormally high or low network traffic, and track trends over time. + +Select **See more** to open a detailed view where you can see a list of all instances in your data plane, along with their traffic averages, trends, and request counts. + +Use the menu in the top-right corner to change the time range for utilization data. Select any hostname in the list to view a full set of metrics specific to that instance. + +## HTTP Errors + +The HTTP Errors panel shows the instances in your data plane with the highest number of HTTP server error responses for the selected period. The list is sorted in descending order. Use this panel to identify instances with high HTTP error counts and take corrective actions. + +Select **See more** to open a detailed view where you can see a list of all instances in your data plane and their HTTP error counts. Select any hostname in the list to view a full set of metrics specific to that instance. + +Use the menu at the top of the panel to select the HTTP status code(s) to display. Use the menu in the top-right corner to change the time range for error data. Select any hostname in the list to view a full set of metrics specific to that instance. \ No newline at end of file diff --git a/content/nim/fundamentals/tech-specs.md b/content/nim/fundamentals/tech-specs.md new file mode 100644 index 000000000..f695f5a1c --- /dev/null +++ b/content/nim/fundamentals/tech-specs.md @@ -0,0 +1,149 @@ +--- +docs: DOCS-805 +doctypes: reference +title: Technical Specifications +toc: true +weight: 20 +--- + +## Overview + +NGINX Instance Manager provides centralized management for NGINX Open Source and NGINX Plus instances across various environments, including bare metal, containers, public clouds (AWS, Azure, Google Cloud), and virtual machines. It supports several Linux distributions, including Amazon Linux, CentOS, Debian, RHEL, and Ubuntu. This guide outlines the technical specifications, minimum requirements, and supported platforms for deploying NGINX Instance Manager, ensuring optimal performance in both small and large environments. + +## Supported deployment environments {#supported-environments} + +You can deploy NGINX Instance Manager in the following environments: + +- **Bare metal** +- **Container** +- **Public cloud**: AWS, Google Cloud Platform, Microsoft Azure +- **Virtual machine** + +## Supported Linux Distributions {#supported-distributions} + +{{< include "nim/tech-specs/supported-distros.md" >}} + +## Supported NGINX Versions {#nginx-versions} + +{{< include "nim/tech-specs/supported-nginx-versions.md" >}} + +## Sizing recommendations {#system-sizing} + +The following recommendations provide the minimum guidelines for NGINX Instance Manager. These guidelines ensure adequate performance, but for optimal results, we strongly recommend using solid-state drives (SSDs) for storage. + +### Standard NGINX configuration deployments + +This section outlines the recommendations for NGINX Instance Manager deployments with data plane instances using standard configurations. **Standard configurations** typically support up to **40 upstream servers** with associated location and server blocks, and up to **350 certificates**. This is ideal for medium-sized environments or applications with moderate traffic. + +We recommend using SSDs to enhance storage performance. + +{{}} +| Number of Data Plane Instances | CPU | Memory | Network | Storage | +|--------------------------------|--------|----------|-----------|---------| +| 10 | 2 vCPU | 4 GB RAM | 1 GbE NIC | 100 GB | +| 100 | 2 vCPU | 4 GB RAM | 1 GbE NIC | 1 TB | +| 1000 | 4 vCPU | 8 GB RAM | 1 GbE NIC | 3 TB | +{{}} + +These values represent the minimum resources needed for deployments that fall under standard configurations. + +### Large NGINX configuration deployments + +For environments requiring more resources, **large configurations** are suitable. These configurations can support up to **300 upstream servers** and are designed for enterprise environments or applications handling high traffic and complex configurations. + +{{}} +| Number of Data Plane Instances | CPU | Memory | Network | Storage | +|--------------------------------|--------|----------|-----------|---------| +| 50 | 4 vCPU | 8 GB RAM | 1 GbE NIC | 1 TB | +| 250 | 4 vCPU | 8 GB RAM | 1 GbE NIC | 2 TB | +{{}} + +### Sizing benchmarks for storage + +The following benchmarks focus on **disk storage** requirements for NGINX Instance Manager. Storage needs depend on the **number of instances** and **data retention periods** (in days). The benchmarks are divided into three configuration sizes: + +- **Small configuration**: Typically supports about **15 servers**, **50 locations**, and **30 upstreams/backends**. Each instance generates **3,439 metrics per minute**. +- **Medium configuration**: Usually includes about **50 servers**, **200 locations**, and **200 upstreams/backends**. Each instance generates **16,766 metrics per minute**. +- **Generic Large configuration**: Handles up to **100 servers**, **1,000 locations**, and **900 upstreams/backends**. In **NGINX Plus**, each instance generates **59,484 metrics per minute**. + +#### Storage requirements for NGINX Plus + +The table below provides storage estimates for **NGINX Plus** based on configuration size, number of instances, and a 14-day data retention period. Larger configurations and longer retention periods will require proportionally more storage. + +{{}} +| Config Size | Instances | Retention (days) | Estimated Disk Usage (NGINX Plus) | +|---------------------|-----------|------------------|-----------------------------------| +| **Small Size** | 10 | 14 | 5 GiB | +| | 50 | 14 | 25 GiB | +| | 100 | 14 | 45 GiB | +| | 1000 | 14 | 450 GiB | +| **Medium Size** | 10 | 14 | 25 GiB | +| | 50 | 14 | 126 GiB | +| | 100 | 14 | 251 GiB | +| | 500 | 14 | 1.157 TiB | +| **Generic Large Size** | 10 | 14 | 100 GiB | +| | 50 | 14 | 426 GiB | +| | 100 | 14 | 850 GiB | +| | 250 | 14 | 2 TiB | +{{}} + +{{}}MiB (mebibyte), GiB (gibibyte), and TiB (tebibyte) are units of data storage. MiB equals 1,024^2 (2^20) bytes, GiB equals 1,024^3 (2^30) bytes, and TiB equals 1,024^4 (2^40) bytes. These are often used in computing to represent binary data storage capacities, as opposed to MB (megabyte), GB (gigabyte), and TB (terabyte), which use decimal units.{{}} + +#### Storage requirements for NGINX OSS + +**NGINX OSS** collects fewer metrics per instance compared to NGINX Plus. This is because NGINX OSS lacks the advanced features of NGINX Plus, such as the NGINX Plus API, which limits the amount of operational data collected and stored. For example, in the **Generic Large configuration**, NGINX OSS generates only **167 metrics per minute per instance**, compared to **59,484 metrics per minute** in NGINX Plus. + +The table below shows the estimated storage requirements for **NGINX OSS**, based on the number of instances and a 14-day retention period. + +{{}} +| Config Size | Instances | Retention (days) | Estimated Disk Usage (NGINX OSS) | +|-----------------------|-----------|------------------|----------------------------------| +| **Generic Large Size** | 10 | 14 | 200 MiB | +| | 50 | 14 | 850 MiB | +| | 100 | 14 | 1.75 GiB | +| | 250 | 14 | 4 GiB | +{{}} + +## Firewall ports {#firewall} + +NGINX Instance Manager and NGINX Agent use the Unix domain socket by default and proxy through the gateway on port `443`. + +To ensure smooth communication, make sure port 443 is open on any firewalls between NGINX Instance Manager, NGINX Agent, and other systems they need to communicate with. This allows secure HTTPS traffic to pass through. + +## Logging {#logging} + +NGINX Instance Manager stores its log files in `/var/log/nms`. To prevent your system from running out of disk space as logs grow, we recommend either creating a separate partition for logs or enabling [log rotation](http://nginx.org/en/docs/control.html#logs). + +## Supported Browsers {#supported-browsers} + +The NGINX Instance Manager web interface works best on the latest versions of these browsers: + +- [Google Chrome](https://www.google.com/chrome/) +- [Firefox](https://www.mozilla.org/en-US/firefox/new/) +- [Safari](https://support.apple.com/downloads/safari) +- [Microsoft Edge](https://www.microsoft.com/en-us/edge) + +## Support for NGINX App Protect WAF + +{{< include "nim/tech-specs/nim-app-protect-support.md" >}} + +## Security Monitoring Module {#security-monitoring} + + +### Dependencies with NGINX Instance Manager + +#### Control plane requirements + +{{< include "nim/tech-specs/security-management-plane-dependencies.md" >}} + +### Dependencies with NGINX App Protect WAF and NGINX Plus + +#### Data plane requirements + +{{< include "nim/tech-specs/security-data-plane-dependencies.md" >}} + +## NGINX Agent + +#### Data plane requirements + +- **Supported distributions**: The NGINX Agent can run on most environments. For the supported distributions, see the [NGINX Agent Technical Specs](https://docs.nginx.com/nginx-agent/technical-specifications/) guide. diff --git a/content/nim/monitoring/_index.md b/content/nim/monitoring/_index.md new file mode 100644 index 000000000..d90569edb --- /dev/null +++ b/content/nim/monitoring/_index.md @@ -0,0 +1,5 @@ +--- +title: Monitoring +weight: 60 +url: /nginx-instance-manager/monitoring/ +--- \ No newline at end of file diff --git a/content/nim/monitoring/catalogs/_index.md b/content/nim/monitoring/catalogs/_index.md new file mode 100644 index 000000000..335c611cc --- /dev/null +++ b/content/nim/monitoring/catalogs/_index.md @@ -0,0 +1,13 @@ +--- +aliases: +- /analytics/dimensions/reference/ +- /analytics/metrics/reference/ +description: This section includes reference catalogs that describe the dimensions and metrics collected by the NGINX Agent. +menu: + docs: + parent: Analytics + weight: 20 +title: Catalogs +weight: 400 +url: /nginx-instance-manager/monitoring/catalogs/ +--- diff --git a/content/nim/monitoring/catalogs/dimensions.md b/content/nim/monitoring/catalogs/dimensions.md new file mode 100644 index 000000000..eedcf182d --- /dev/null +++ b/content/nim/monitoring/catalogs/dimensions.md @@ -0,0 +1,15 @@ +--- +catalog: true +description: Learn about the Dimensions collected by NGINX Agent +docs: DOCS-812 +doctypes: +- reference +tags: +- docs +title: Dimensions Catalog +toc: true +catalogType: nms.catalogs.dimensions +weight: 20 +--- + +{{< catalogs-dimensions >}} diff --git a/content/nim/monitoring/catalogs/events.md b/content/nim/monitoring/catalogs/events.md new file mode 100644 index 000000000..726a5e3b1 --- /dev/null +++ b/content/nim/monitoring/catalogs/events.md @@ -0,0 +1,15 @@ +--- +catalog: true +description: Information about all of the Events collected by NGINX Controller Agent +docs: DOCS-1132 +doctypes: +- reference +tags: +- docs +title: Events Catalog +toc: true +catalogType: nms.catalogs.events +weight: 20 +--- + +{{< catalogs-events >}} diff --git a/content/nim/monitoring/catalogs/metrics.md b/content/nim/monitoring/catalogs/metrics.md new file mode 100644 index 000000000..3ca3f964f --- /dev/null +++ b/content/nim/monitoring/catalogs/metrics.md @@ -0,0 +1,15 @@ +--- +catalog: true +description: Information about all of the Metrics collected by NGINX Agent +docs: DOCS-813 +doctypes: +- reference +tags: +- docs +title: Metrics Catalog +toc: true +catalogType: nms.catalogs.metrics +weight: 20 +--- + +{{< catalogs-metrics >}} diff --git a/content/nim/monitoring/count-nginx-plus-instances.md b/content/nim/monitoring/count-nginx-plus-instances.md new file mode 100644 index 000000000..cf31404a8 --- /dev/null +++ b/content/nim/monitoring/count-nginx-plus-instances.md @@ -0,0 +1,81 @@ +--- +description: Tracking your [NGINX Plus]({{< relref "nginx/" >}}) installations is + straightforward with [NGINX Instance Manager]({{< relref "nim/" + >}}). If you're enrolled in a commercial program like the [F5 Flex Consumption Program](https://www.f5.com/products/get-f5/flex-consumption-program), + you'll need to regularly report this data to F5. +docs: DOCS-934 +doctypes: +- task +tags: +- docs +title: Tracking NGINX Plus installations for compliance +toc: true +weight: 1000 +draft: true +--- + +## Overview + +Tracking your [NGINX Plus]({{< relref "nginx/" >}}) installations is + straightforward with [NGINX Instance Manager]({{< relref "nim/" + >}}). If you're enrolled in a commercial program like the [F5 Flex Consumption Program](https://www.f5.com/products/get-f5/flex-consumption-program), + you'll need to regularly report this data to F5. + +{{< include "nginx-plus/usage-tracking/overview.md" >}} + +## Prerequisites + +### Install F5 NGINX Instance Manager on a dedicated host {#install-instance-manager} + +{{< include "nginx-plus/usage-tracking/install-nim.md" >}} + + +## View your NGINX Plus and NGINX App Protect Inventory + +After you've installed NGINX Instance Manager, the next step involves configuring your NGINX Plus data plane to report back. This can be done in two ways. First, you can install NGINX Agent on each instance. Alternatively, for an agentless approach, you can set up HTTP Health Checks, which don't require additional installations. Both methods enable your instances to communicate with Instance Manager. + +### Set up instance reporting for NGINX Plus {#set-up-reporting} + +Select the tab that matches your preferred method for setting up reporting: + +- Configure native usage reporting (since NGINX Plus [Release 31]({{< relref "/nginx/releases.md#nginxplusrelease-31-r31" >}})) +- Install NGINX Agent +- Configure HTTP Health Check for NGINX Plus without NGINX Agent + +{{}} + +{{%tab name="Native Usage Reporting"%}} + +{{< include "nginx-plus/usage-tracking/agentless-reporting.md" >}} + +{{%/tab%}} + +{{%tab name="NGINX Agent"%}} + +{{< include "nginx-plus/usage-tracking/install-nginx-agent.md" >}} + +{{%/tab%}} + +{{%tab name="HTTP Health Check"%}} + +{{< include "nginx-plus/usage-tracking/http-health-check.md" >}} + +{{%/tab%}} + +{{}} + +### Reporting your NGINX Plus inventory to F5 {#view-nginx-plus-usage} + +{{< include "nginx-plus/usage-tracking/view-nginx-plus-count.md" >}} + +## View your NGINX Ingress Controller instances and nodes + +You can set up your Kubernetes-based NGINX Plus products, including [NGINX Ingress Controller](https://www.nginx.com/products/nginx-ingress-controller/) and [Connectivity Stack for Kubernetes](https://www.nginx.com/solutions/kubernetes/), to report usage data to NGINX Instance Manager. + +### Set up usage reporting for NGINX Ingress Controller + +Follow the instructions in the [Enabling Usage Reporting](https://docs.nginx.com/nginx-ingress-controller/usage-reporting/) guide to enable usage reporting for NGINX Ingress Controller. + +### Reporting your NGINX Ingress Controller clusters to F5 + +{{< include "nginx-plus/usage-tracking/get-list-k8s-deployments.md" >}} diff --git a/content/nim/monitoring/metrics-api.md b/content/nim/monitoring/metrics-api.md new file mode 100644 index 000000000..8ef474d6a --- /dev/null +++ b/content/nim/monitoring/metrics-api.md @@ -0,0 +1,353 @@ +--- +description: Tips and tricks for using the Metrics API query parameters to refine + your data. +docs: DOCS-825 +doctypes: +- tutorial +tags: +- docs +title: Query the Metrics API +toc: true +weight: 200 +--- + +## Overview + +You can use the Analytics module to monitor your NGINX instances and evaluate your applications' performance. The Metrics API query parameters let you fine-tune your system data based on parameters such as time window, aggregation, time resolution, and filter. + +By using different combinations of these query parameters, you can gather information that lets you: + +- Identify system health -- query for various system metrics such as CPU, Memory to get the current state of your system +- Identify traffic behavior -- query for the HTTP / Stream Requests handled by an instance. +- Monitor your application performance -- filter on HTTP response codes to track the number of successful or failed requests + +## Usage + +You can use the Metrics API to query for desired metric names and fine-tune the data returned based on the following parameters: + +- time window (`startTime` and `endTime`) +- `filter` +- `dimensions` +- `resolution` +- `groupBy` + +## Authentication + +You can use basic authentication or JWT authentication to access the NGINX Instance Manager REST API, as described in the [NGINX Instance Manager API Overview]({{< relref "/nim/fundamentals/api-overview#authentication" >}}). + +The examples in this guide demonstrate using a "bearer" token for authentication. The token is sent using the "Authorization" request header field and "Bearer" schema. + +### Understanding the Metrics API Response + +The Metrics API response consists of query metadata and an array of `metrics` -- one array element for each queried metric. + +- The **metric** object includes the queried metric name and an array of data series associated with the metric. +- The **series** object groups metrics data according to dimension values. The series consists of dimensions (key-value map), timestamps, and the timestamps' metric values. + +```json +{ + "metrics":[ + { + "name":"http.request.count", + "series":[ + { + "dimensions":{ + "instance":"instance-name-1", + "nginx_id":"nginx-id-1", + }, + "timestamps":[ + "2020-12-10T12:00:00Z" + ], + "values":[ + 1000 + ] + }, + { + "dimensions":{ + "instance":"instance-name-2", + "nginx_id":"nginx-id-2", + }, + "timestamps":[ + "2020-07-01T12:00:00Z" + ], + "values":[ + 2000 + ] + } + ] + } + ] +} +``` + +In the preceding example, there are two data series for the queried metric. The differentiator between the two series is the "nginx_id" value. This value is what makes NGINX metrics instance centric: you can easily distinguish metrics based on their dimensions' values, such as an Instance, NGINX ID or System ID. + +You can view the full list of the supported metrics and dimensions, with detailed descriptions, by querying the Catalog API: + +```shell +curl -X GET --url "/api/platform/v1/analytics/catalogs/metrics" -H "Authorization: Bearer " +``` + +Likewise, you can get a full list of the available dimensions by querying the Catalogs API: + +```shell +curl -X GET --url "/api/platform/v1/analytics/catalogs/dimensions" -H "Authorization: Bearer " +``` + +This information is also provided in the [Catalogs Reference]({{< relref "/nms/reference/catalogs//_index.md" >}})). + +### Querying the Metrics API + +This section provides an overview of each query parameter and examples of using the parameters together to refine your data. + +The examples progress from basic usage to more advanced API queries. + +#### Names + +The `names` parameter is the only required parameter in the Metrics API. + +The following example query returns a response with the last recorded value for the queried metric: `nginx.http.request.count`: + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=nginx.http.request.count" -H "Authorization: Bearer " +``` + +If the dimension values differ, the `series` array in the response will contain multiple items. + +It is possible to query the API for several metrics simultaneously. To do so, provide the metric names as a comma-separated list: + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=nginx.http.request.count,nginx.http.conn.accepted" -H "Authorization: Bearer " +``` + +#### Time Window + +To get more than the last recorded value for the queried metric, use the following time window parameters: + +- `startTime` indicates the start of the time window to include metrics from (inclusive). +- `endTime` means the end of the time window to include metrics from (non-inclusive). + +There are a few rules to remember when working with time window parameters: + +- If you provide an `endTime`, you must also provide a `startTime`; +- `endTime` must be greater than `startTime`; +- If you give a `startTime` but don't give an `endTime`, the `endTime` defaults to the current time. + +You can define time using the `ISO 8601` format or as an offset (for example, `2020-07-14T13:07:11Z`). An offset is a string that starts with `+` or `-`, followed by a number and a unit of time: `y`, `M`, `w`, `d`, `h`, `m`, or `s`. You can also use `now` to indicate the current timestamp. + +The following example request returns all the recorded metric values for the last 12 hours. + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=nginx.http.request.count&startTime=now-12h" -H "Authorization: Bearer " +``` + +The following example query contains a fully defined time window: + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=nginx.http.request.count&startTime=now-5h&endTime=2020-07-01T09:00:00Z" -H "Authorization: Bearer " +``` + +In this case, the response contains metrics from 05:00:00 to 09:00:00 on the 1st of July 2020. + +#### Aggregations + +Using only `names` and time window parameters will give you the raw data points of metrics values. + +To get a more organized response, you can provide an aggregate function for each queried metric: `AVG`, `SUM`, `COUNT`, `MAX`, `MIN`, or `RATE`. + +{{< note >}} +In the following definitions, `time period` refers to the `resolution` (if provided) or the difference between the `endTime` and `startTime` (when `resolution` is not provided). +{{< /note >}} + +- `AVG` - calculates the average value of the metric data samples over the period +- `SUM` - calculates the total value of the metric data samples over the period +- `COUNT` - returns the number of collected data samples of the metric over the period +- `MIN`/`MAX` - returns the minimal/maximal data sample of the metric from the given period +- `RATE` - returns an average value of the metric calculated per second (always *per second*, regardless of the provided `resolution`), based on the data available in the given period + +{{< note >}} +You must define a `startTime` when using aggregate functions. +{{< /note >}} + +{{< see-also >}} +The list of supported aggregate functions for any particular metric is available in the [Metrics Catalog]({{< relref "/nms/reference/catalogs//metrics.md" >}})). +{{< /see-also >}} + +For example, the following query returns a single value (per dimension set), which is the sum of the metric values for the last 12 hours. To get proper values, ensure that the `endTime` is greater than the `startTime`. + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=SUM(nginx.http.request.count)&startTime=now-12h" -H "Authorization: Bearer " +``` + +It is possible to use aggregated and non-aggregated metrics in a single query. For this query, the Metrics API returns a single value per dimension set. That value is the sum of all of the metric's values for the last 12 hours. + +For example: + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=SUM(nginx.http.request.count),nginx.http.conn.accepted&startTime=now-12h" -H "Authorization: Bearer " +``` + +#### Resolution + +If you want to change the returned data's granularity, you can use `resolution` parameter. This parameter must be used in conjunction with an aggregation function and a time window (at least `startTime` must be provided). + +The `resolution` parameter must be a valid duration. The duration is a string that starts with a number, followed by a unit of time: `y`, `M`, `w`, `d`, `h`, `m`, or `s`. + +The following example query returns three aggregated metric values. Here, we're asking for the data from last 12 hours with one-hour granularity: + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=SUM(nginx.http.request.count)&startTime=now-12h&resolution=1h" -H "Authorization: Bearer " +``` + +#### Filter + +This parameter, as the name indicates, filters results based on the value of dimensions. Filtering by dimension value can help to refine the data that's returned into a more specific set. + +The `filter` query consists of one or more predicates in the form of ``, where: + +- `` is the name of the dimension; +- `` is one of the supported operators (`=`, `!=`, `<`, `<=`, `>=` `>`, `in` or `not`); +- `` is value of the dimension(s) that you want to filter on. + +For example, the following query includes a simple filter on the app name. The query returns data for the application named `app1` for the last 12 hours. + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=nginx.http.request.count&filter=nginx_id='nginx_id1'&startTime=now-12h" -H "Authorization: Bearer " +``` + +{{< tip >}} + +- Predicates can be combined into logical expressions using `OR`, `AND`, and `(` `)`. +- For matching values, wildcard (`*`) use is supported. +- We recommend wrapping predicates in single quotes to ensure that the full query string is processed correctly. + +{{< /tip >}} + +The following example request uses `filter` with logical expressions: + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=nginx.http.request.count&filter=nginx_id='nginx_id1*' and server_zone='zone1'&startTime=now-12h" -H "Authorization: Bearer " +``` + +#### GroupBy + +Using filters and aggregation functions may not be enough to allow you to get comprehensive information about a specific application or environment. + +The `groupBy` parameter helps to gather results according to the specified dimension(s). You can provide multiple dimension names as a comma-separated list. + +{{< note >}} + +- When using `groupBy`, you must use an aggregate function and a time window (`startTime` must be defined; `endTime` is optional). +- If a request contains aggregated and non-aggregated metrics, the `groupBy` parameter will apply only to the aggregated metrics. + +{{< /note >}} + +For example, the following query returns data grouped by `nginx_id` for the last 12 hours. + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=SUM(nginx.http.request.count)&groupBy=nginx_id&startTime=now-12h" -H "Authorization: Bearer " +``` + +The API response for the query looks similar to the following: + +```json +{ + "metrics":[ + { + "aggr": "SUM", + "name":"nginx.http.request.count", + "series":[ + { + "dimensions":{ + "nginx_id":"nginx-id-1", + }, + "timestamps":[ + "2020-12-13T12:00:00Z" + ], + "values":[ + 1000 + ] + }, + { + "dimensions":{ + "nginx_id":"nginx-id-2", + }, + "timestamps":[ + "2020-12-13T12:00:00Z" + ], + "values":[ + 2000 + ] + } + ] + } + ] +} +``` + +The API returns the data for the last 12 hours grouped by `nginx_id` dimension. Unlike other queries, the API only returns those dimensions that have been selected in `groupBy`. However, the series of different dimension values are still distinguished. + +#### Dimensions + +You can use the `dimensions` query parameter to specify which dimension(s) should be included in each metric series' response. + +Dimensions not specified in the query parameter will not be included in the response. This may result in some series having the same dimension set but being returned as separate list items. + +The following example returns results for the specified metric, where `dimensions=nginx_id`: + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=SUM(nginx.http.request.count)&dimensions=nginx_id&startTime=now-12h" -H "Authorization: Bearer " +``` + +```json +{ + "metrics":[ + { + "aggr": "SUM", + "name":"nginx.http.request.count", + "series":[ + { + "dimensions":{ + "nginx_id":"nginx-id-1" + }, + "timestamps":[ + "2020-12-13T12:00:00Z" + ], + "values":[ + 1000 + ] + }, + { + "dimensions":{ + "nginx_id":"nginx-id-2" + }, + "timestamps":[ + "2020-12-13T12:00:00Z" + ], + "values":[ + 2000 + ] + } + ] + } + ] +} +``` + +If `dimensions` and `groupBy` parameters are both used, the list of provided `dimensions` must be a subset of the list provided in `groupBy`. + +The following example uses `dimensions` with `groupBy`: + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=SUM(nginx.http.request.count)&groupBy=nginx_id&dimensions=system_id&startTime=now-12h&resolution=5m" -H "Authorization: Bearer " +``` + +The `dimensions` parameter also lets you omit the dimensions from the response altogether. To do so, define `dimensions` as an empty list (`dimensions=`). + +This results in several data series for the `http.request.count` metric without any dimensions being visible. That is not useful on its own; however, if you combine the empty `dimensions` parameter with metric aggregation, you will receive a single series with aggregated values. + +For example, the following example query sums all the values in all of the series of the `http.request.count` metric for the past three hours using the default `resolution`. + +```shell +curl -X GET --url "/api/platform/v1/analytics/metrics?names=SUM(nginx.http.request.count)&startTime=now-12h&dimensions=" -H "Authorization: Bearer " +``` diff --git a/content/nim/monitoring/overview-metrics.md b/content/nim/monitoring/overview-metrics.md new file mode 100644 index 000000000..d8c968f76 --- /dev/null +++ b/content/nim/monitoring/overview-metrics.md @@ -0,0 +1,81 @@ +--- +description: Understanding how the NGINX Agent collects and reports metrics +docs: DOCS-826 +doctypes: +- reference +tags: +- docs +title: "Overview: NGINX instance metrics" +toc: true +weight: 100 +--- + +## Overview + +The data that NGINX Instance Manager collects can be divided into two categories: + +- **System metrics**: Data collected about the data plane system, such as CPU and memory usage. +- **Traffic metrics**: Data related to processed traffic from sources such as NGINX OSS, NGINX Plus, or NGINX logs. + +Metrics are collected every 15 seconds and are published at 60-second intervals. + +For the full list of metrics, see the [Metrics Catalog Reference]({{< relref "/nms/reference/catalogs//metrics.md" >}}) + +## Metrics Collection and Reporting Process + +While the NGINX Agent is running on the host, it collects metrics at regular 15-second intervals. Metrics then are downsampled and sent to the Manager server once per minute. + +NGINX Instance Manager stores historical metrics data in an analytics database. Metrics are aggregated and rolled-up as follows: + +- Data not older than 8 days are stored with best possible resolution (usually 1 min). +- Data older than 8 days but not older than 30 days are stored with 5 min resolution. +- Data older than 30 days but not older than 15 months are stored with 1 hour resolution. +- Data older than 15 months are stored with 1 day resolution. + +### F5 NGINX Plus Metrics + +Enable the NGINX Plus API to collect NGINX Plus metrics by uncommenting the `/api/` location section in `/etc/nginx/conf.d/default.conf`: + +```nginx {hl_lines=[4]} +# enable /api/ location with appropriate access control in order +# to make use of NGINX Plus API +# +location /api/ { + api write=on; + allow 127.0.0.1; + deny all; +} +``` + +### NGINX OSS Metrics + +Enable NGINX Stub Status API to collect NGINX metrics in NGINX OSS. A sample Stub Status API configuration is shown below: + +```nginx +server { + listen 127.0.0.1:8080; + location /api { + stub_status; + allow 127.0.0.1; + deny all; + } +} +``` + +### NGINX Access Log Metrics + +Enable NGINX Access Logging to collect metrics from parsing access logs. A sample Access Log format is shown below: + +```nginx +log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" ' + '"$bytes_sent" "$request_length" "$request_time" ' + '"$gzip_ratio" $server_protocol '; + +access_log /var/log/nginx/access.log main; +``` + +## Troubleshooting + +System metrics are collected by the NGINX Agent without requiring the user to perform any additional setup. Additional setup is required to enable collection of NGINX related metrics. diff --git a/content/nim/monitoring/security-monitoring/_index.md b/content/nim/monitoring/security-monitoring/_index.md new file mode 100644 index 000000000..e5d05ad14 --- /dev/null +++ b/content/nim/monitoring/security-monitoring/_index.md @@ -0,0 +1,5 @@ +--- +title: Security Monitoring +weight: 10000 +url: /nginx-instance-manager/monitoring/security-monitoring/ +--- diff --git a/content/nim/monitoring/security-monitoring/configure/_index.md b/content/nim/monitoring/security-monitoring/configure/_index.md new file mode 100644 index 000000000..0abd112a1 --- /dev/null +++ b/content/nim/monitoring/security-monitoring/configure/_index.md @@ -0,0 +1,6 @@ +--- +title: Configure +description: +weight: 100 +url: /nginx-instance-manager/monitoring/security-monitoring/configure/ +--- \ No newline at end of file diff --git a/content/nim/monitoring/security-monitoring/configure/create-role-security-monitoring.md b/content/nim/monitoring/security-monitoring/configure/create-role-security-monitoring.md new file mode 100644 index 000000000..b010c215d --- /dev/null +++ b/content/nim/monitoring/security-monitoring/configure/create-role-security-monitoring.md @@ -0,0 +1,62 @@ +--- +description: Learn how to grant users access to the F5 NGINX Security Monitoring dashboards. +docs: DOCS-1026 +doctypes: +- task +tags: +- docs +title: Give Users Access to Security Monitoring Dashboards +toc: true +weight: 200 +--- + +{{< shortversions "1.0.0" "latest" "secvers" >}} + +## Overview + +You can use F5 NGINX Security Monitoring to monitor NGINX App Protect WAF instances. The Security Monitoring analytics dashboards and security logs provide protection insights and help you analyze possible threats or identify opportunities to tune your security policies. + +By completing the steps in this topic, you will create a role that gives users access to the Security Monitoring module and logs, and assign it to user accounts or groups. + +{{}}The recommendations in this guide follow the principle of least privilege and do not grant users access to NGINX Instance Manager. You can create additional roles with custom modules, features, and permissions to suit your use case.{{}} + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- NGINX Security Monitoring is [installed]({{< relref "/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md" >}}) and running. +- Your user account needs to be able to access the User Management settings in NGINX Instance Manager. + The minimum required role permissions are: + + - **Module**: Settings + - **Feature**: User Management + - **Access**: `READ`, `CREATE`, `UPDATE` + +- Review the table below to determine the minimum permissions needed for your use case. + + {{}} + + | Module(s) | Feature(s) | Access | Description | + |-------|--------|----|--------| + | Instance Manager
    Security Monitoring | Analytics
    Security Monitoring | READ
    READ | Read-only access that allows users to view the Security Monitoring dashboards. Users cannot access NGINX Instance Manager or Settings.| + | Instance Manager
    Security Monitoring
    Settings | Analytics
    Security Monitoring
    User Management | READ
    READ
    CREATE, READ, UPDATE| Allows users to view the Security Monitoring dashboards and manage user accounts and roles.

    {{< fa "lightbulb" >}} Recommended for a "super-user" who is responsible for managing other users' access to the security dashboards. This permission set does not allow the user to delete user accounts.| + + + {{
    }} + + +## Create a Role + +{{< include "nim/rbac/create-roles.md" >}} + +## Assign the Role + +After you've created a role for Security Monitoring, assign the role to one or more users or to a user group. + +### Assign the Role to Users + +{{< include "nim/rbac/assign-roles-to-users.md" >}} + +### Assign the Role to User Groups + +{{< include "nim/rbac/assign-roles-to-user-groups.md" >}} diff --git a/content/nim/monitoring/security-monitoring/configure/set-up-app-protect-instances.md b/content/nim/monitoring/security-monitoring/configure/set-up-app-protect-instances.md new file mode 100644 index 000000000..0f82488ea --- /dev/null +++ b/content/nim/monitoring/security-monitoring/configure/set-up-app-protect-instances.md @@ -0,0 +1,242 @@ +--- +description: Learn how to set up F5 NGINX App Protect data plane instances for use with + the NGINX Security Monitoring and NGINX Instance Manager. +docs: DOCS-1107 +doctypes: +- task +tags: +- docs +title: Set Up App Protect Instances for Security Monitoring +toc: true +weight: 100 +--- + +## Overview + +F5 NGINX Security Monitoring supports the following use cases: + +- **Security Monitoring only**: Use only the Security Monitoring module to monitor data from NGINX App Protect WAF instances. You will be able to review the security dashboards to assess potential threats and identify opportunities to fine-tune your policies. Your NGINX App Protect WAF configurations are managed outside of the NGINX Instance Manager context. +- **Security Monitoring and Instance Manager**: Use the Security Monitoring module with the NGINX Instance Manager. In addition to monitoring your application security, you will be able to manage your NGINX App Protect WAF configurations and security policies in a single location and push pre-compiled updates to an instance or instance group. + +### Before You Begin + +Complete the following prerequisites before proceeding with the steps in this guide. + +1. If you are new to NGINX App Protect WAF, follow the instructions in the installation and configuration guides to get up and running: + + - [Install NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect/admin-guide/install/) on one or more data plane instances. Each data plane instance must have connectivity to the NGINX Instance Manager host. + - [Configure NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#policy-configuration-overview) according to your needs on each of the data plane instance. + +1. Review the dependencies with NGINX App Protect WAF and NGINX Plus. + + {{< include "nim/tech-specs/security-data-plane-dependencies.md" >}} + +1. Determine your use case: **Security Monitoring only** or **Security Monitoring and Configuration Management**. +1. [Install the NGINX Security Monitoring module]({{< relref "/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md" >}}) and [upload your license]({{< relref "/nim/admin-guide/license/add-license.md" >}}). + + +## Install NGINX Agent {#agent-config} + +NGINX Agent is a companion daemon for NGINX Open Source or NGINX Plus instance that provides: + +- Remote management of NGINX configurations +- Collection and reporting of real-time NGINX performance and operating system metrics +- Notifications of NGINX events + +Repeat the steps in this section on each NGINX App Protect WAF data plane host to install and configure NGINX Agent for use with Security Monitoring. **These settings apply to both of the Security Monitoring use cases.** + +1. Use SSH to connect to the data plane host. +1. Install the NGINX Agent package from the NGINX Instance Manager host. + + {{< include "agent/installation/install-agent-api.md" >}} + +1. Edit the `/etc/nginx-agent/nginx-agent.conf` file to add the `nap_monitoring` configuration. + + Add the lines below to the end of the file. This enables NGINX Agent to send NGINX App Protect messages to the NGINX Instance Manager management plane. + + ```yaml + dataplane: + status: + # poll interval for data plane status - the frequency the NGINX Agent will query the data plane for changes + poll_interval: 30s + # report interval for data plane status - the maximum duration to wait before syncing data plane information if no updates have been observed + report_interval: 24h + events: + # report data plane events back to the management plane + enable: true + metrics: + # specify the size of a buffer to build before sending metrics + bulk_size: 20 + # specify metrics poll interval + report_interval: 1m + collection_interval: 15s + mode: aggregated + + # OSS NGINX default config path + # path to aux file dirs can also be added + config_dirs: "/etc/nginx:/usr/local/etc/nginx:/usr/share/nginx/modules:/etc/nms:/etc/app_protect" + + # Enable reporting NGINX App Protect details to the management plane. + extensions: + - nginx-app-protect + - nap-monitoring + + # Enable reporting NGINX App Protect details to the control plane. + nginx_app_protect: + # Report interval for NGINX App Protect details - the frequency the NGINX Agent checks NGINX App Protect for changes. + report_interval: 15s + # Enable precompiled publication from the NGINX Instance Manager (true) or perform compilation on the data plane host (false). + precompiled_publication: true + + # NGINX App Protect Monitoring config + nap_monitoring: + # Buffer size for collector. Will contain log lines and parsed log lines + collector_buffer_size: 50000 + # Buffer size for processor. Will contain log lines and parsed log lines + processor_buffer_size: 50000 + # Syslog server IP address the collector will be listening to + syslog_ip: "127.0.0.1" + # Syslog server port the collector will be listening to + syslog_port: 514 + ``` + +1. If the `location /api` directive has not been set up in the `nginx.conf` file, follow the example below to add it: + + ```nginx + server{ + location /api { + api write=on; + allow 127.0.0.1; + deny all; + } + } + ``` + + After adding the directive, restart NGINX to apply the changes: + + ```bash + sudo systemctl restart nginx + ``` + + {{}}You can change the values of `syslog_ip` and `syslog_port` to meet your needs. + You must use the same values when configuring logging for the Security Monitoring module. If the `syslog:` configuration does not match these settings, the monitoring dashboards will not display any data. Also, the networking changes for NGINX App Protect Version 5 preclude the use of `127.0.0.1` as a syslog server address. For Version 5, the address of the `docker0` interface (typically `192.0.10.1`) or the IP address of the data plane host can be used for the syslog server address.{{}} + + {{}}You can use the NGINX Agent installation script to add the fields for `nginx_app_protect` and `nap_monitoring`: + +```bash +# Download install script via API +curl https:///install/nginx-agent > install.sh + +# Use the flag --nap-monitoring to set the child fields for the field 'nap_monitoring', the +# child field values will be set to the values in the example configuration from above. Specify +# the -m | --nginx-app-protect-mode flag to set up management of NGINX App Protect on the instance. +# In the example below we specify 'precompiled-publication' for the flag value which will make the +# config field 'precompiled_publication' set to 'true', if you would like to set the config field +# 'precompiled_publication' to 'false' you can specify 'none' as the flag value. +sudo sh ./install.sh --nap-monitoring true --nginx-app-protect-mode precompiled-publication +``` + + {{}} + +1. Restart NGINX Agent: + + ``` bash + sudo systemctl restart nginx-agent + ``` + +## Set Up Instances for Security Monitoring Only {#monitor-only} + +Complete the steps in this section if you are only using the Security Monitoring module to monitor your application security. In this use case, you are **not using Instance Manager** to manage your WAF security policies. + +Repeat the steps below on each NGINX App Protect WAF data plane instance. + +1. Use SSH to connect to the data plane host. + +1. Create a new log format definition file with the name `/etc/app_protect/conf/log_sm.json` and the contents shown below. + This defines the log format for the Security Monitoring module. + + This configuration sets the maximum accepted request payload to 2048 bytes and the maximum message size to 5k. The latter setting truncates messages larger than 5k. +2. Add character escaping for the used separator `,` to be escaped with its standard URL encoding `%2C`. + + ``` json + { + "filter": { + "request_type": "illegal" + }, + "content": { + "format": "user-defined", + "format_string": "%blocking_exception_reason%,%dest_port%,%ip_client%,%is_truncated_bool%,%method%,%policy_name%,%protocol%,%request_status%,%response_code%,%severity%,%sig_cves%,%sig_set_names%,%src_port%,%sub_violations%,%support_id%,%threat_campaign_names%,%violation_rating%,%vs_name%,%x_forwarded_for_header_value%,%outcome%,%outcome_reason%,%violations%,%violation_details%,%bot_signature_name%,%bot_category%,%bot_anomalies%,%enforced_bot_anomalies%,%client_class%,%client_application%,%client_application_version%,%transport_protocol%,%uri%,%request%", + "escaping_characters": [ + { + "from": ",", + "to": "%2C" + } + ], + "max_request_size": "2048", + "max_message_size": "5k", + "list_delimiter": "::" + } + } + ``` + +1. Find the context in your NGINX configuration where NGINX App Protect WAF logging is enabled. + In the same context, add the `app_protect_security_log` directive shown in the example below to configure attack data logging for use with the Security Monitoring dashboards. + + ```nginx + app_protect_security_log_enable on; + app_protect_security_log "/etc/app_protect/conf/log_sm.json" syslog:server=127.0.0.1:514; + ``` + + {{}}The `syslog:server=:` must match the `syslog_ip` and `syslog_port` values specified in the [NGINX Agent configuration file](#agent-config). The dashboards won't display any data if these settings don't match. Also, the networking changes for NGINX App Protect Version 5 preclude the use of `127.0.0.1` as a syslog server address. For Version 5, the address of the `docker0` interface (typically `192.0.10.1`) or the IP address of the data plane host can be used for the syslog server address.{{}} + +1. Restart NGINX Agent and the NGINX web server. + + ```bash + sudo systemctl restart nginx-agent + sudo systemctl restart nginx + ``` + +You should now be able to view data from your NGINX App Protect instances in the NGINX Security Monitoring dashboards. + +## Set up Instances for Security Monitoring with Instance Manager {#monitor-and-manage} + +Complete the steps in this section if you want to use the Security Monitoring module **and** Instance Manager. In this use case, you will use NGINX Instance Manager to monitor threats and to manage your NGINX App Protect WAF configurations and security policies. + +Take the steps below to update your NGINX App Protect WAF configurations by using Instance Manager. + +1. Log in to the NGINX Instance Manager user interface and go to **Modules** > **Instance Manager**. +1. Select **Instances** or **Instance Groups**, as appropriate. +1. Select **Edit Config** from the **Actions** menu for the desired instance or instance group. +1. Next, edit the desired configuration file. You will add directives that reference the security policies bundle and enable the NGINX App Protect WAF logs required by the Security Monitoring dashboards. An example configuration is provided below. + + ```nginx + app_protect_enable on; + app_protect_enable on; + app_protect_policy_file "/etc/nms/NginxDefaultPolicy.tgz"; + app_protect_security_log_enable on; + app_protect_security_log "/etc/nms/secops_dashboard.tgz" syslog:server=127.0.0.1:514; + ``` + + - Add the `app_protect_policy_file` directive with a reference to a security policy. + + The policy reference must use the `.tgz` file extension when using Instance Manager to perform precompiled publication of NGINX App Protect WAF policies and log profiles. The file path referenced must exist on the NGINX Instance Manager host, but it's ok if the policy file doesn't exist yet. If your Instance is not configured for precompiled publication, then use the `.json` file extension for polcies and log profiles. In this case, the file path referenced in the NGINX configuration must reside on the Instance. + + If you are using custom security policies, at this stage, it's fine to use the default security policy shown in the example above. After completing the steps in this guide, refer to the instructions in [Set Up App Protect WAF Configuration Management]({{< relref "/nim/nginx-app-protect/setup-waf-config-management#add-waf-config" >}}) to add your custom security policy files to NGINX Instance Manager and update your NGINX configuration. + + - Add the `app_protect_security_log_enable on` and the `app_protect_security_log` directive to any NGINX context where NGINX App Protect WAF is enabled and you want to be able to review attack data. + + The logging configuration must reference `"/etc/nms/secops_dashboard.tgz"`, as shown in the example. + + If the `app_protect_security_log_enable` setting is already present, just add the `app_protect_security_log` beneath it in the same context. + + {{}}The `syslog:server=:` must match the `syslog_ip` and `syslog_port` values specified in the [NGINX Agent configuration file](#agent-config). The Security Monitoring dashboards won't display any data if these settings don't match. Also, the networking changes for NGINX App Protect Version 5 preclude the use of `127.0.0.1` as a syslog server address. For Version 5, the address of the `docker0` interface (typically `192.0.10.1`) or the IP address of the data plane host can be used for the syslog server address.{{}} + +1. Select **Publish** to immediately push the configuration file updates out to your NGINX instance or instance group. + +You should now be able to view data from your NGINX App Protect WAF instances in the Security Monitoring dashboard. + +## What's Next + +- [Grant Users Access to the Security Monitoring Dashboards]({{< relref "create-role-security-monitoring" >}}): Follow the steps in this guide to allow other users in your organization to access the Security Monitoring Dashboards. + +- If you are using Security Monitoring with Instance Manager, proceed to the [Set Up App Protect WAF Configuration Management]({{< relref "/nim/nginx-app-protect/setup-waf-config-management" >}}) guide. diff --git a/content/nim/monitoring/security-monitoring/configure/update-geo-db.md b/content/nim/monitoring/security-monitoring/configure/update-geo-db.md new file mode 100644 index 000000000..a2d03eadc --- /dev/null +++ b/content/nim/monitoring/security-monitoring/configure/update-geo-db.md @@ -0,0 +1,49 @@ +--- +description: Learn how to update the Geolocation Database used in F5 NGINX Management + Suite Security Monitoring dashboards. +docs: DOCS-1108 +doctypes: +- task +tags: +- docs +title: Update Geolocation Database used in Security Monitoring Dashboards +toc: true +weight: 200 +--- + +{{< shortversions "1.0.0" "latest" "secvers" >}} + + + +## Overview + +You can use F5 NGINX Security Monitoring to monitor NGINX App Protect WAF instances. The Security Monitoring analytics dashboard uses MaxMind's GeoLite2 Free Database to provide extra Geolocation data for Security Violations. + +By completing the steps in this topic, you will be able to update the Security Monitoring module to get the latest Geolocation database such that the dashboards can provide accurate data. + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- NGINX Security Monitoring is [installed]({{< relref "/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md" >}}) and running. +- NGINX App Protect is configured, and the Security Monitoring dashboard is gathering security violations + +## How to update Geolocation Database + +1. Create a [MaxMind](https://dev.maxmind.com/geoip/geolite2-free-geolocation-data/) account and subscribe to get the latest updates to the Geolocation database. +1. Download the GeoLite2 Country (Edition ID: GeoLite2-Country) database in a GeoIP2 Binary `.mmdb` format from the [MaxMind](https://www.maxmind.com/en/accounts/current/geoip/downloads) website. The database will be present in a `gzip` downloaded file. +1. Unzip the downloaded `gzip` file, which contains the binary data of the GeoLite2 Country database with a filename `GeoLite2-Country.mmdb` +1. Replace the `GeoLite2-Country.mmdb` present on your NGINX Instance Manager's Control Plane at `/usr/share/nms/geolite2/GeoLite2-Country.mmdb` with the newly downloaded GeoLite2 Country database. + + Example: + + ```bash + sudo scp /path/to/GeoLite2-Country.mmdb {user}@{host}:/usr/share/nms/geolite2/GeoLite2-Country.mmdb + ``` + +1. Restart the NGINX Instance Manager services + + ```bash + sudo systemctl restart nms-ingestion + sudo systemctl restart nms-core + ``` diff --git a/content/nim/monitoring/security-monitoring/configure/update-signatures.md b/content/nim/monitoring/security-monitoring/configure/update-signatures.md new file mode 100644 index 000000000..79d69679f --- /dev/null +++ b/content/nim/monitoring/security-monitoring/configure/update-signatures.md @@ -0,0 +1,54 @@ +--- +description: Learn how to update the Attack Signature Database used in F5 NGINX Management + Suite Security Monitoring dashboards. +docs: DOCS-1109 +doctypes: +- task +tags: +- docs +title: Manage the Security Monitoring Signature Database +toc: true +weight: 200 +--- + +{{< shortversions "1.0.0" "latest" "secvers" >}} + +## Overview + +You can use the F5 NGINX Security Monitoring module to monitor NGINX App Protect WAF instances for security. The Security Monitoring module analytics dashboards utilize a Signature Database to give more detail about the Attack Signatures that have caused a Security Violation, like the Signature's name, accuracy, and risk. If the Signature Database is not updated to match the Attack Signature version used for App Protect WAF protection, new signatures may be triggered without a name or other attributes like risk and accuracy. + +Make sure the dashboards show the right info by following the steps in this topic to update the Security Monitoring module with the newest Attack Signature data. + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- NGINX Security Monitoring is [installed]({{< relref "/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md" >}}) and running +- NGINX App Protect is configured, and the Security Monitoring dashboard is gathering security violations + +## How to Update the Signature Database + +1. Open an SSH connection to the data plane host and log in. +1. Use the [Attack Signature Report Tool](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#attack-signature-report-tool) to generate a Signature Report file. The filename must be `signature-report.json`. + + Example: + + ```bash + sudo /opt/app_protect/bin/get-signatures -o ./signature-report.json + ``` + +1. Open an SSH connection to the management plane host and log in. +1. Replace the `signature-report.json` on your NGINX Instance Manager's control plane at `/usr/share/nms/sigdb/signature-report.json` with the newly generated Signature Report. + + Example: + + ```bash + sudo scp /path/to/signature-report.json {user}@{host}:/usr/share/nms/sigdb/signature-report.json + ``` + +1. Restart the NGINX Instance Manager services: + + ```bash + sudo systemctl restart nms-ingestion + sudo systemctl restart nms-core + ``` diff --git a/content/nim/monitoring/security-monitoring/deploy/_index.md b/content/nim/monitoring/security-monitoring/deploy/_index.md new file mode 100644 index 000000000..1e50bc359 --- /dev/null +++ b/content/nim/monitoring/security-monitoring/deploy/_index.md @@ -0,0 +1,6 @@ +--- +title: Deploy +description: +weight: 1 +url: /nginx-instance-manager/monitoring/security-monitoring/deploy/ +--- \ No newline at end of file diff --git a/content/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md b/content/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md new file mode 100644 index 000000000..317824ee1 --- /dev/null +++ b/content/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md @@ -0,0 +1,151 @@ +--- +description: +docs: DOCS-1208 +doctypes: +- tutorial +tags: +- docs +title: "Install or Upgrade Security Monitoring" +toc: true +weight: 40 +--- + +## Overview + +Follow the steps in this guide to install or upgrade or upgrade the NGINX Security Monitoring module. + +## Before You Begin + +### Security Considerations + +{{< include "installation/secure-installation.md" >}} + +### Installation Prerequisites + +{{< include "installation/nms-prerequisites.md" >}} + +### Dependencies with Instance Manager + +{{< include "nim/tech-specs/security-management-plane-dependencies.md" >}} + +--- + +## Install Security Monitoring + +{{}} + +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +1. To install the latest version of the Security Monitoring module, run the following command: + + ```bash + sudo yum -y install nms-sm + ``` + +{{%/tab%}} +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +1. To install the latest version of the Security Monitoring module, run the following commands: + + ```bash + sudo apt-get update + sudo apt-get install -y nms-sm + ``` + +{{%/tab%}} + +{{}} + +2. Restart the F5 NGINX Instance Manager services: + + ```bash + sudo systemctl restart nms + ``` + + NGINX Instance Manager components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. + +3. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +4. If running Security Monitoring v1.7.0 or higher, start the module: + + ```bash + sudo systemctl start nms-sm + ``` + + +### Accessing the Web Interface + +{{< include "installation/access-web-ui.md" >}} + +### Add License + +A valid license is required to make full use of all the features in Security Monitoring module. + +Refer to the [Add a License]({{< relref "/nim/admin-guide/license/add-license.md" >}}) topic for instructions on how to download and apply a trial license, subscription license, or Flexible Consumption Program license. + +--- + +## Upgrade Security Monitoring {#upgrade-security-monitoring} + +{{}}The upgrade process for Security Monitoring **does not** automatically upgrade Instance Manager, which is a package dependency. To ensure compatibility with Security Monitoring, you will need to manually [upgrade Instance Manager]({{< relref "/nim/deploy/vm-bare-metal/install.md#upgrade-nim" >}}) to a version supported by Security Monitoring. For specific version dependencies between Security Monitoring and Instance Manager, refer to the [Security Monitoring release notes]({{< relref "/nim/monitoring/security-monitoring/releases/release-notes.md" >}}).{{}} + +
    + +{{}} +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +1. To upgrade to the latest version of Security Monitoring, run the following command: + + ```bash + sudo yum update -y nms-sm + ``` + +{{%/tab%}} + +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +1. To upgrade to the latest version of the Security Monitoring, run the following command: + + ```bash + sudo apt-get update + sudo apt-get install -y --only-upgrade nms-sm + ``` + +{{%/tab%}} +{{}} + +2. Restart the NGINX Instance Manager platform services: + + ```bash + sudo systemctl restart nms + ``` + + NGINX Instance Manager components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. + +3. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +4. If running Security Monitoring v1.7.0 or higher, start the module: + + ```bash + sudo systemctl start nms-sm + ``` + +5. (Optional) If you use SELinux, follow the steps in the [Configure SELinux]({{< relref "/nim/system-configuration/configure-selinux.md" >}}) guide to restore the default SELinux labels (`restorecon`) for the files and directories related to NGINX Instance Manager. + +--- + +## What's Next + +### Set Up Data Plane + +To set up your NGINX App Protect WAF data plane instances for use with Security Monitoring, refer to the following instructions: + +- [Set Up App Protect Instances for Security Monitoring]({{< relref "/nim/monitoring/security-monitoring/configure/set-up-app-protect-instances" >}}) diff --git a/content/nim/monitoring/security-monitoring/releases/_index.md b/content/nim/monitoring/security-monitoring/releases/_index.md new file mode 100644 index 000000000..fa3c4c338 --- /dev/null +++ b/content/nim/monitoring/security-monitoring/releases/_index.md @@ -0,0 +1,6 @@ +--- +title: Releases +description: "Stay up-to-date with the latest F5 NGINX Security Monitoring releases." +weight: 800 +url: /nginx-instance-manager/monitoring/security-monitoring/releases/ +--- diff --git a/content/nim/monitoring/security-monitoring/releases/known-issues.md b/content/nim/monitoring/security-monitoring/releases/known-issues.md new file mode 100644 index 000000000..9621bd342 --- /dev/null +++ b/content/nim/monitoring/security-monitoring/releases/known-issues.md @@ -0,0 +1,123 @@ +--- +description: This document lists and describes the known issues and possible workarounds + in the F5 NGINX Security Monitoring module. Fixed issues are removed + after **45 days**. +docs: DOCS-1077 +tags: +- docs +title: Known Issues +toc: true +weight: 200 +--- + +{{}} + +{{< tip >}}We recommend you upgrade to the latest version of the Security Monitoring module to take advantage of new features, improvements, and bug fixes.{{< /tip >}} + + +--- + +## 1.7.0 +October 18, 2023 + +### {{% icon-bug %}} Web interface fails to load after restarting NGINX Instance Manager {#44587} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44587 | Open | + +{{}} +#### Description +The NGINX Instance Manager web interface can fail to load with a "Page not found" error after restarting its service. The security monitoring module will fail to appear on the launchpad until the page is manually reloaded. + +#### Workaround + +Reload the page in the browser to resolve this issue. + +--- + +## 1.5.0 +June 12, 2023 + +### {{% icon-resolved %}} Using empty values as filters returns inaccurate results {#42941} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42941 | Fixed in Security Monitoring -1.6.0 | + +{{}} +#### Description +Using an empty string as a key or value results in an empty dataset. + +--- + +## 1.0.0 +November 17, 2022 + +### {{% icon-resolved %}} The API Connectivity Manager module won't load if the Security Monitoring module is enabled {#44433} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44433 | Fixed in Instance Manager 2.8.0 | + +{{}} +#### Description +If you have Instance Manager 2.7 or earlier installed and attempt to enable both the API Connectivity Manager and Security Monitoring modules on the same NGINX Instance Manager management plane, the API Connectivity Manager module will not load because of incompatibility issues with the Security Monitoring module. + +#### Workaround + +Before enabling the API Connectivity Manager and Security Monitoring modules, ensure that your Instance Manager is upgraded to version 2.8 or later. Be sure to read the release notes for each module carefully, as they may contain important information about version dependencies. + +To see which version of Instance Manager you have installed, run the following command: + +- CentOS, RHEL, RPM-based: + + ```bash + yum info nms-instance-manager + ``` + +- Debian, Ubuntu, Deb-based: + + ```bash + dpkg -s nms-instance-manager + ``` + +--- + +### {{% icon-bug %}} Filtering data by Instance Group in the Security Monitoring module does not show any results. {#38790} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 38790 | Open | + +{{}} +#### Description +The Security Monitoring plugin on NGINX Agent does not automatically pick up changes made to agent-dynamic.conf, causing the Instance Group association to be missing in the Security Violations generated. + +#### Workaround + +Restart the NGINX Agent, and the subsequent Violations should be associated with the Instance Group: + +`systemctl restart nginx-agent` + +--- + +### {{% icon-resolved %}} The field retrieving URIs is incorrectly listed as URL {#38377} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 38377 | Fixed in Security Monitoring -1.2.0 | + +{{}} +#### Description +The field with URI data was mapped to the heading URL. The name of the field has been corrected. diff --git a/content/nim/monitoring/security-monitoring/releases/release-notes.md b/content/nim/monitoring/security-monitoring/releases/release-notes.md new file mode 100644 index 000000000..5be52a3f5 --- /dev/null +++ b/content/nim/monitoring/security-monitoring/releases/release-notes.md @@ -0,0 +1,321 @@ +--- +description: These release notes list and describe the new features, enhancements, + and resolved issues in the F5 NGINX Security Monitoring module. +docs: DOCS-1078 +doctypes: +- reference +tags: +- docs +title: Release Notes +toc: true +weight: 100 +--- + +{{}} + +--- + +## 1.7.1 + +October 23, 2023 + +### Upgrade Paths {#1-7-1-upgrade-paths} + +Security Monitoring supports upgrades from these previous versions: + +- 1.4.0 - 1.7.0 + +If your Security Monitoring version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +### What's New{#1-7-1-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Known Issues{#1-7-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md" >}}) topic. + +--- + +## 1.7.0 + +October 18, 2023 + +### Upgrade Paths {#1-7-0-upgrade-paths} + +Security Monitoring supports upgrades from these previous versions: + +- 1.4.0 - 1.6.0 + +If your Security Monitoring version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +### Changes in Default Behavior{#1-7-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Security Monitoring backend service** + + The backend for Security Monitoring is now served by the `nms-sm` process instead of `nms-core`. The `nms-sm` process must be started after installation of the `nms-sm` package. + + +### Known Issues{#1-7-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md" >}}) topic. + +--- + +## 1.6.0 + +July 20, 2023 + +### Upgrade Paths {#1-6-0-upgrade-paths} + +Security Monitoring supports upgrades from these previous versions: + +- 1.3.0 - 1.5.0 + +If your Security Monitoring version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +### Resolved Issues{#1-6-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Using empty values as filters returns inaccurate results [(42941)]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md#42941" >}}) + +### Known Issues{#1-6-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md" >}}) topic. + +--- + +## 1.5.0 + +June 12, 2023 + +### Upgrade Paths {#1-5-0-upgrade-paths} + +Security Monitoring supports upgrades from these previous versions: + +- 1.2.0 - 1.4.0 + +If your Security Monitoring version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +### What's New{#1-5-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Improved security monitoring with violation and signature details** + + This release adds violation and signature details to Security Monitoring. This information helps you identify false positives and gain a more comprehensive understanding of violations, allowing you to fine-tune your security policies and optimize your threat detection. + + +### Known Issues{#1-5-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md" >}}) topic. + +--- + +## 1.4.0 + +April 26, 2023 + +### Upgrade Paths {#1-4-0-upgrade-paths} + +Security Monitoring supports upgrades from these previous versions: + +- 1.1.0 - 1.3.0 + +If your Security Monitoring version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +### What's New{#1-4-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **View violation context for requests in Event logs** + + You can now view the request entity and its associated details that triggered a WAF violation from the event logs. + + +### Changes in Default Behavior{#1-4-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Update to the Signature context pie chart** + + The Signature context pie chart now shows information related to signature-based violations in requests and URIs, in addition to the already available header, parameter, and cookie information. + + +### Known Issues{#1-4-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md" >}}) topic. + +--- + +## 1.3.0 + +March 21, 2023 + +### Upgrade Paths {#1-3-0-upgrade-paths} + +Security Monitoring supports upgrades from these previous versions: + +- 1.0.0 - 1.2.0 + +If your Security Monitoring version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +### What's New{#1-3-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Top Signatures section added to the Main tab** + + The "Top Signatures" section is now available in the "Main" tab of the Security Monitoring module dashboard. + + +### Security Updates{#1-3-0-security-updates} + +{{< important >}} +For the protection of our customers, NGINX doesn’t disclose security issues until an investigation has occurred and a fix is available. +{{< /important >}} + +This release includes the following security updates: + +- {{% icon-resolved %}} **Instance Manager vulnerability CVE-2023-1550** + + NGINX Agent inserts sensitive information into a log file ([CVE-2023-1550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-1550)). An authenticated attacker with local access to read NGINX Agent log files may gain access to private keys. This issue is exposed only when the non-default trace-level logging is enabled. + + NGINX Agent is included with NGINX Instance Manager, and used in conjunction with API Connectivity Manager and the Security Monitoring module. + + This issue has been classified as [CWE-532: Insertion of Sensitive Information into Log File](https://cwe.mitre.org/data/definitions/532.html). + +#### Mitigation + +- Avoid configuring trace-level logging in the NGINX Agent configuration file. For more information, refer to the [Configuring the NGINX Agent]({{< relref "/nms/nginx-agent/install-nginx-agent.md#configuring-the-nginx-agent ">}}) section of NGINX Instance Manager documentation. If trace-level logging is required, ensure only trusted users have access to the log files. + +#### Fixed in + +- NGINX Agent 2.23.3 +- Instance Manager 2.9.0 + +For more information, refer to the MyF5 article [K000133135](https://my.f5.com/manage/s/article/K000133135). + + +### Changes in Default Behavior{#1-3-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Improved error message when NGNIX Management Suite server is not running** + + The Security Monitoring module now displays the message "Upstream unavailable" when the NGINX Instance Manager server is not running, instead of the previous message "Oops something went wrong." + +- {{% icon-feature %}} **Single quotes are automatically escaped in filtered values** + + Single quotes in filtered values are automatically escaped to ensure that the data is parsed correctly. + + +### Known Issues{#1-3-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md" >}}) topic. + +--- + +## 1.2.0 + +January 30, 2023 + +### Upgrade Paths {#1-2-0-upgrade-paths} + +Security Monitoring supports upgrades from these previous versions: + +- 1.0.0 - 1.1.0 + +If your Security Monitoring version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +### What's New{#1-2-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Get the latest Signature and Geolocation Databases** + + [Update the Signature database]({{< relref "/nim/monitoring/security-monitoring/configure/update-signatures" >}}) to get the latest attack signature details. + + [Update the Geolocation Database]({{< relref "/nim/monitoring/security-monitoring/configure/update-geo-db" >}}) to get the most accurate mapping of IP address to Geolocation. + + +### Resolved Issues{#1-2-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} The field retrieving URIs is incorrectly listed as URL [(38377)]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md#38377" >}}) + +### Known Issues{#1-2-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md" >}}) topic. + +--- + +## 1.1.0 + +December 20, 2022 + +### Upgrade Paths {#1-1-0-upgrade-paths} + +Security Monitoring supports upgrades from these previous versions: + +- 1.0.0 + +If your Security Monitoring version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +### Changes in Default Behavior{#1-1-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Removal of Total Requests count** + + The Total Requests count was removed from the Security Monitoring dashboards, to avoid customer confusion, as the value didn't convey different configuration scenarios for NGINX App Protect on NGINX instances. + +- {{% icon-feature %}} **Removal of WAF PASSED requests count** + + The count of WAF `PASSED` requests was removed from the Security Monitoring dashboards to avoid customer confusion, as it counted only requests with violations and not all requests filtered by NGINX App Protect WAF. + + +### Known Issues{#1-1-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md" >}}) topic. + +--- + +## 1.0.0 + +November 17, 2022 + + +### What's New{#1-0-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Introducing the NGINX Security Monitoring module** + + Use the NGINX Security Monitoring module to monitor the NGINX App Protect WAF protection of your apps and APIs. View protection insights for analyzing possible threats and tuning policies. + + The Security Monitoring module includes the following: + + - Informative dashboards that provide valuable protection insights + - In-depth security log details to help with analyzing possible threats and making policy decisions + + Refer to the [Installation Guide]({{< relref "/nim/deploy/_index.md" >}}) to get started. + + +### Known Issues{#1-0-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/monitoring/security-monitoring/releases/known-issues.md" >}}) topic. + diff --git a/content/nim/monitoring/security-monitoring/troubleshooting.md b/content/nim/monitoring/security-monitoring/troubleshooting.md new file mode 100644 index 000000000..75230b7b2 --- /dev/null +++ b/content/nim/monitoring/security-monitoring/troubleshooting.md @@ -0,0 +1,49 @@ +--- +description: This topic describes possible issues users might encounter when using + the Security Monitoring module. When possible, suggested workarounds are provided. +docs: DOCS-1226 +doctypes: +- reference +tags: +- docs +title: Troubleshooting +toc: true +weight: 1000 +--- + +## Security Event log backup with Security Monitoring + +### Description + +If a Security Violation event is not received by the Security Monitoring module, the data representing the attack is lost. + +### Resolution + +F5 NGINX App Protect supports logging to multiple destinations, enabling the user to send a log to NGINX agent and a copy to be stored as a backup. In the event of a failure to receive Security Events in Security Monitoring, the backup log can be checked to verify attack details. Change the settings below to enable backup logging: + +1. Instance with Security Monitoring only + +```nginx +app_protect_policy_file "/etc/app_protect/conf/NginxDefaultPolicy.json"; +app_protect_security_log_enable on; +app_protect_security_log "/etc/app_protect/conf/log_sm.json" syslog:server=127.0.0.1:514; +app_protect_security_log "/etc/app_protect/conf/log_sm.json" ; +# Example: app_protect_security_log "/etc/app_protect/conf/log_sm.json" /var/log/app_protect/security.log; +``` + +1. Instance with Security Monitoring and NGINX Instance Manager + +```nginx +app_protect_policy_file "/etc/nms/NginxDefaultPolicy.tgz"; +app_protect_security_log_enable on; +app_protect_security_log "/etc/nms/secops_dashboard.tgz" syslog:server=127.0.0.1:514; +app_protect_security_log "/etc/nms/secops_dashboard.tgz" ; +# Example: app_protect_security_log "/etc/nms/secops_dashboard.tgz" /var/log/app_protect/security.log; +``` + +--- + +## How to Get Support + +{{< include "support/how-to-get-support.md" >}} + diff --git a/content/nim/monitoring/view-events-metrics.md b/content/nim/monitoring/view-events-metrics.md new file mode 100644 index 000000000..8d6e9076b --- /dev/null +++ b/content/nim/monitoring/view-events-metrics.md @@ -0,0 +1,280 @@ +--- +description: Learn how to view events and metrics in F5 NGINX Instance Manager. +docs: DOCS-847 +doctypes: task +title: View events and metrics +toc: true +weight: 300 +--- + +## Overview + +F5 NGINX Instance Manager provides events and metrics data for your instances. You can access this information in the user interface, or by using the REST API. + +{{}}This topic provides examples for using the REST API to view, sort, and filter Events data that can be applied across the NGINX Instance Manager REST API.{{}} + +## View Events in the User Interface + +To view events in the NGINX Instance Manager user interface, take the following steps: + +1. In a web browser, go to the FQDN for your NGINX Instance Manager host and log in. +2. In the **Platform** section, select **Events**. The **Events** overview page lists the events from the last six hours, with the most recent event listed first. +3. You can use the filters to filter events by level and time range, and sort events by selecting the column heading. +4. Select an event from the list to view the details. + +## Access Events data by using the REST API + +You can use the Events API to view NGINX Instance Manager events data. You can use basic authentication or JWT authentication to access the NGINX Instance Manager REST API, as described in the [NGINX Instance Manager API Overview]({{< relref "/nim/fundamentals/api-overview#authentication" >}}). + +The examples in this guide demonstrate using a "bearer" token for authentication. The token is sent using the "Authorization" request header field and "Bearer" schema. + +### Query the Events API for all Events + +To query the Events API, send a GET request similar to the following example to the Events endpoint: + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events" -H "Authorization: Bearer " +``` + +
    +Example Response + +```json +{ + "Metadata": { + "pagination": { + "links": { + "next": {}, + "prev": {} + }, + "pageToken": "1639453182" + } + }, + "items": [{ + "category": "agent status", + "dimensions": { + "alias": "alias", + "hostname": "hostname", + "instance": "instance", + "nginx_id": "nginx_id", + "system_id": "system_id", + }, + "id": "uuid", + "level": "INFO", + "message": "successfully applied config on ", + "status": "Config Apply Success", + "timestamp": "2021-12-14T01:03:11Z" + }, { + "category": "agent status", + "dimensions": { + "alias": "alias", + "hostname": "hostname", + "instance": "instance", + "nginx_id": "nginx_id", + "system_id": "system_id", + }, + "error": "Config apply failed (write): Error running nginx -t exit status 1", + "id": "uuid", + "level": "INFO", + "message": "failed to apply nginx config on ", + "status": "Config Apply Failure", + "timestamp": "2021-12-14T00:57:48Z" + },{ + "category": "agent status", + "dimensions": { + "alias": "alias", + "hostname": "hostname", + "instance": "instance", + "nginx_id": "nginx_id", + "system_id": "system_id" + }, + "id": "uuid", + "level": "INFO", + "message": "nginx-agent v2.1.6 stopped on ", + "status": "Agent Stop", + "timestamp": "2021-12-13T20:08:49Z" + }, { + "category": "agent status", + "dimensions": { + "alias": "alias", + "hostname": "hostname", + "instance": "instance", + "nginx_id": "nginx_id", + "system_id": "system_id", + }, + "id": "uuid", + "level": "INFO", + "message": "nginx-agent v2.1.6 started on ", + "status": "Agent Start", + "timestamp": "2021-12-13T03:20:00Z" + }] +} +``` + +
    + +### Filter Events with Query Parameters + +The list of events can be filtered by passing different query parameters to the API request. The type of filtering depends on the chosen query parameters. This section introduces the list of available query parameters. + +Note that query parameters are only intended for filtering an events collection, not for querying a single event resource. + +#### Time interval + +Events can be queried with an exclusive time interval by passing either a `startTime` or both a `startTime` and an `endTime`. + +##### Start Time + +Passing a `startTime` query parameter to an Events API request will return only the events that occurred after the provided timestamp: + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?startTime=2022-03-19T08:00:00.000000000Z" -H "Authorization: Bearer " +``` + +The `startTime` parameter can use the keyword `now` to signify the timestamp at the time of the request. + +Timestamps relative to `now` can be passed by subtracting a period of time from the current time, for example `now-3h` or `now-30m` + +For example: + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?startTime=now-3h" -H "Authorization: Bearer " +``` + +Alternatively, the UUID of an event can be passed as a `startTime`. In this case, the events that occurred after the given event will be returned: + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?startTime=c77b71b5-3afa-497a-8e1c-fdc11d676796" -H "Authorization: Bearer " +``` + +##### End Time + +The `endTime` query parameter cannot be passed without a `startTime`. Together they form an exclusive time interval, where the `startTime` is inclusive and `endTime` is non-inclusive. It can be formatted in the same three ways as `startTime`. + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?startTime=2022-03-19T08:00:00Z&endTime=2022-03-19T12:00:00Z" -H "Authorization: Bearer " +``` + +### Filtering + +The `filter` parameter enables filtering events based on predicates. Predicates are in the form: + +`` + +Where a `` is one of the event's dimensions +`` is one of `=`, `!=`, `>=`, `<=`, `<`, `>`, `in`, `not` +`` and `` are both case sensitive. + +Predicates can be combined into logical expressions using `OR`, `AND`, `(` and `)`. Wildcards (`*`) are supported for matching values. + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?filter=category IN ('agent','nms') AND level='debug' AND count > 100" -H "Authorization: Bearer " +``` + +### Sorting + +Events can be sorted based on any of their dimensions with the `orderBy` query parameter. + +`orderBy` dimensions are separated by commas and can optionally given an order. + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?orderBy=timestamp DESC,id" -H "Authorization: Bearer " +``` + +The order of the dimensions can be either ascending (ASC) or descending (DESC). By default, that is when the order is omitted, dimensions are sorted in ascending order. + +In the above example, events are sorted in descending timestamp order first, and second in ascending ID order (in cases where timestamps are equal). + +### Pagination + +There are several query parameters related to pagination in the API. By default, pagination is enabled with one hundred events returned per page. + +#### Page + +The `page` query parameter returns the events for the given page number. By default the first page is returned. + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?page=3" -H "Authorization: Bearer " +``` + +#### Page Size + +`pageSize` determines how many events are returned per page, up to a maximum of 100. Setting `pageSize` to zero disables pagination. + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?pageSize=3" -H "Authorization: Bearer " +``` + +#### Page Token + +`pageToken` is a transactional token that ensures consistency of queries across requests. Responses to queries made with the same `pageToken` will always be the same. The response is a snapshot of the database contents at the time of the original request when the `pageToken` was first used. + +If `pageToken` is omitted, a token is automatically generated and returned in the response's metadata. Subsequent requests can then use that token to ensure consistency. + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?pageToken=1573653786" -H "Authorization: Bearer " +``` + +### Aggregations + +#### Count + +Passing the `includeTotal` query parameter with a value of `true` will return the total number of events of the response. The count of events will be in the response's metadata. + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events?includeTotal=true" -H "Authorization: Bearer " +``` + +### Query a Single Event + +Querying for a unique event requires only the event's UUID. + +```shell +curl -X GET --url "https:///api/platform/v1/analytics/events/7cb91de6-49ae-4ddc-a8b3-3255e00b9346" -H "Authorization: Bearer " +``` + +
    +Example response + +```json +{ + "category": "agent status", + "dimensions": { + "alias": "devenv-agent", + "hostname": "devenv-agent", + "instance": "3d54a8fe-7c90-374f-9cad-fa2b8fccb0cd", + "nginx_id": "3d54a8fe-7c90-374f-9cad-fa2b8fccb0cd", + "system_id": "3d54a8fe-7c90-374f-9cad-fa2b8fccb0cd" + }, + "id": "7cb91de6-49ae-4ddc-a8b3-3255e00b9346", + "level": "INFO", + "message": "nginx-agent v2.11.0 started on devenv-agent", + "status": "Agent Start", + "timestamp": "2022-03-21T14:33:37Z" +} +``` + +
    +--- + +## View Metrics in the User Interface + +{{< shortversions "2.3.0" "latest" "nimvers" >}} + +The **Metrics Summary** page includes a highlights section of the most important metrics reported in the last minute, plus a summary of the key system, network, HTTP request, and connection metrics at a glance. + +To view the metrics summary for an NGINX instance, take the following steps: + +1. In a web browser, go to the FQDN for your NGINX Instance Manager host and log in. +2. Under **Modules**, select the **Instance Manager**. +3. Select an instance on the **Instances** detail page. +4. Select the **Metrics Summary** tab. +5. To view detailed metrics as graphs, select the **Metrics** tab. + +{{}} +Select a time range to change the period for the metrics display. The metrics data refreshes every 30 seconds. +{{}} + +For NGINX OSS instances, you can view basic system metrics and metrics for the NGINX data plane. These provide a high-level overview of your system. + +Additional tabs for NGINX metrics are available if the selected instance is an NGINX Plus instance. These metrics provide a more in-depth overview of your system. diff --git a/content/nim/nginx-app-protect/_index.md b/content/nim/nginx-app-protect/_index.md new file mode 100644 index 000000000..6f85ed7b2 --- /dev/null +++ b/content/nim/nginx-app-protect/_index.md @@ -0,0 +1,5 @@ +--- +title: NGINX App Protect WAF +weight: 90 +url: /nginx-instance-manager/nginx-app-protect/ +--- \ No newline at end of file diff --git a/content/nim/nginx-app-protect/manage-waf-security-policies.md b/content/nim/nginx-app-protect/manage-waf-security-policies.md new file mode 100644 index 000000000..915208c3c --- /dev/null +++ b/content/nim/nginx-app-protect/manage-waf-security-policies.md @@ -0,0 +1,1077 @@ +--- +title: Manage WAF Security Policies and Security Log Profiles +description: Learn how to use F5 NGINX Management Suite Instance Manager to manage NGINX + App Protect WAF security policies and security log profiles. +weight: 200 +toc: true +type: how-to +product: NIM +docs: DOCS-1105 +--- + +## Overview + +F5 NGINX Management Suite Instance Manager provides the ability to manage the configuration of NGINX App Protect WAF instances either by the user interface or the REST API. This includes editing, updating, and deploying security policies, log profiles, attack signatures, and threat campaigns to individual instances and/or instance groups. + +In Instance Manager v2.14.0 and later, you can compile a security policy, attack signatures, and threat campaigns into a security policy bundle. A security policy bundle consists of the security policy, the attack signatures, and threat campaigns for a particular version of NGINX App Protect WAF, and additional supporting files that make it possible for NGINX App Protect WAF to use the bundle. Because the security policy bundle is pre-compiled, the configuration gets applied faster than when you individually reference the security policy, attack signature, and threat campaign files. + +{{}} +The following capabilities are only available via the Instance Manager REST API: + +- Update security policies +- Create, read, and update security policy bundles +- Create, read, update, and delete Security Log Profiles +- Publish security policies, security log profiles, attack signatures, and/or threat campaigns to instances and instance groups +{{}} + +--- + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- [Set Up App Protect WAF Configuration Management]({{< relref "setup-waf-config-management" >}}) +- Verify that your user account has the [necessary permissions]({{< relref "/nim/admin-guide/rbac/overview-rbac.md" >}}) to access the Instance Manager REST API: + + - **Module**: Instance Manager + - **Feature**: Instance Management + - **Access**: `READ` + - **Feature**: Security Policies + - **Access**: `READ`, `CREATE`, `UPDATE`, `DELETE` + +The following are required to use support policy bundles: + +- You must have `UPDATE` permissions for the security policies specified in the request. +- The correct `nms-nap-compiler` packages for the NGINX App Protect WAF version you're using are [installed on Instance Manager]({{< relref "/nim/nginx-app-protect/setup-waf-config-management.md#install-the-waf-compiler" >}}). +- The attack signatures and threat campaigns that you want to use are [installed on Instance Manager]({{< relref "/nim/nginx-app-protect/setup-waf-config-management.md#set-up-attack-signatures-and-threat-campaigns" >}}). + +### How to Access the Web Interface + +To access the web interface, go to the FQDN for your NGINX Instance Manager host in a web browser and log in. Once you're logged in, select "Instance Manager" from the Launchpad menu. + +### How to Access the REST API + +{{< include "nim/how-to-access-nim-api.md" >}} + +--- + +## Create a Security Policy {#create-security-policy} + +{{}} + +{{%tab name="web interface"%}} + +
    + +To create a security policy using the Instance Manager web interface: + +1. In a web browser, go to the FQDN for your NGINX Management Suite host and log in. Then, from the Launchpad menu, select **Instance Manager**. +2. On the left menu, select **App Protect**. +3. On the *Security Policies* page, select **Create**. +4. On the *Create Policy* page, fill out the necessary fields: + + - **Name**: Provide a name for the policy. + - **Description**: (Optional) Add a short description for the policy. + - **Enter Policy**: Type or paste the policy in JSON format into the form provided. The editor will validate the JSON for accuracy. + + For more information about creating custom policies, refer to the [NGINX App Protect WAF Declarative Policy](https://docs.nginx.com/nginx-app-protect/declarative-policy/policy/) guide and the [Policy Authoring and Tuning](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#policy-authoring-and-tuning) section of the config guide. + +5. Select **Save**. + +{{%/tab%}} + +{{%tab name="API"%}} + +To upload a new security policy, send an HTTP `POST` request to the Security Policies API endpoint. + +{{}}Before sending a security policy to Instance Manager, you need to encode it using `base64`. Submitting a policy in its original JSON format will result in an error.{{}} + +
    + + +{{}} + +| Method | Endpoint | +|--------|--------------------------------------| +| POST | `/api/platform/v1/security/policies` | + +{{}} + + +For example: + +```shell +curl -X POST https://{{NMS_FQDN}}/api/platform/v1/security/policies \ + -H "Authorization: Bearer " \ + -d @ignore-xss-example.json +``` + +
    +JSON Request + +```json +{ + "metadata": { + "name": "ignore-cross-site-scripting", + "displayName": "Ignore cross-site scripting", + "description": "Ignore cross-site scripting is a security policy that intentionally ignores cross site scripting." + }, + "content": "ewoJInBvbGljeSI6IHsKCQkibmFtZSI6ICJzaW1wbGUtYmxvY2tpbmctcG9saWN5IiwKCQkic2lnbmF0dXJlcyI6IFsKCQkJewoJCQkJInNpZ25hdHVyZUlkIjogMjAwMDAxODM0LAoJCQkJImVuYWJsZWQiOiBmYWxzZQoJCQl9CgkJXSwKCQkidGVtcGxhdGUiOiB7CgkJCSJuYW1lIjogIlBPTElDWV9URU1QTEFURV9OR0lOWF9CQVNFIgoJCX0sCgkJImFwcGxpY2F0aW9uTGFuZ3VhZ2UiOiAidXRmLTgiLAoJCSJlbmZvcmNlbWVudE1vZGUiOiAiYmxvY2tpbmciCgl9Cn0=" +} +``` + +
    + +
    +JSON Response + +```json +{ + "metadata": { + "created": "2022-04-10T23:19:58.502Z", + "description": "string", + "displayName": "Ignore cross-site scripting", + "modified": "2022-04-12T23:19:58.502Z", + "name": "ignore-cross-site-scripting", + "revisionTimestamp": "2022-04-12T23:19:58.502Z", + "uid": "21daa130-4ba4-442b-bc4e-ab294af123e5" + }, + "selfLink": { + "rel": "/api/platform/v1/services/environments/prod" + } +} +``` + +{{%/tab%}} + +{{}} + +--- + +## Update a Security Policy + +To update a security policy, send an HTTP `POST` request to the Security Policies API endpoint, `/api/platform/v1/security/policies`. + +You can use the optional `isNewRevision` parameter to indicate whether the updated policy is a new version of an existing policy. + + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------| +| POST | `/api/platform/v1/security/policies?isNewRevision=true` | +| PUT | `/api/platform/v1/security/policies/{system_id_string}` | + +{{}} + + +For example: + +```shell +curl -X POST https://{{NMS_FQDN}}/api/platform/v1/security/policies?isNewRevision=true \ + -H "Authorization: Bearer " \ + -d @update-xss-policy.json +``` + +You can update a specific policy by sending an HTTP `PUT` request to the Security Policies API endpoint that includes the policy's unique identifier (UID). + +To find the UID, send an HTTP `GET` request to the Security Policies API endpoint. This returns a list of all Security Policies that contains the unique identifier for each policy. + +Include the UID for the security policy in your `PUT` request to update the policy. Once the policy update is accepted, the WAF compiler will create a new, updated bundle. + +For example: + +```shell +curl -X PUT https://{{NMS_FQDN}}/api/platform/v1/security/policies/23139e0a-4ac8-49f9-b7a0-0577b42c70c7 \ + -H "Authorization: Bearer " \ + --Content-Type application/json -d @update-xss-policy.json +``` + +After you have pushed an updated security policy, you can [publish it](#publish-policy) to selected instances or instance groups. + +--- + +## Delete a Security Policy + +{{}} + +{{%tab name="web interface"%}} + +
    + +To delete a security policy using the Instance Manager web interface: + +1. In a web browser, go to the FQDN for your NGINX Management Suite host and log in. Then, from the Launchpad menu, select **Instance Manager**. +2. On the left menu, select **App Protect**. +3. On the *Security Policies* page, select the **Actions** menu (represented by an ellipsis, **...**) for the policy you want to delete. Select **Delete** to remove the policy. + +{{%/tab%}} + +{{%tab name="API"%}} + +To delete a security policy, send an HTTP `DELETE` request to the Security Policies API endpoint that includes the unique identifier for the policy that you want to delete. + + +{{}} + +| Method | Endpoint | +|--------|------------------------------------------------------------| +| DELETE | `/api/platform/v1/security/policies/{security-policy-uid}` | + +{{}} + + +For example: + +```shell +curl -X DELETE https://{{NMS_FQDN}}/api/platform/v1/security/policies/23139e0a-4ac8-49f9-b7a0-0577b42c70c7 \ + -H "Authorization: Bearer " +``` + +{{%/tab%}} + +{{
    }} + +{{%comment%}}TO DO: Add sections for managing attack signatures and threat campaigns{{%/comment%}} + +--- + +## Create Security Policy Bundles {#create-security-policy-bundles} + +To create security policy bundles, send an HTTP `POST` request to the Security Policies Bundles API endpoint. The specified security policies you'd like to compile into security policy bundles must already exist in Instance Manager. + +### Required Fields + +- `appProtectWAFVersion`: The version of NGINX App Protect WAF being used. +- `policyName`: The name of security policy to include in the bundle. This must reference an existing security policy; refer to the [Create a Security Policy](#create-security-policy) section above for instructions. + +### Notes + +- If you do not specify a value for the `attackSignatureVersionDateTime` and/or `threatCampaignVersionDateTime` fields, the latest version of each will be used by default. You can also explicitly state that you want to use the most recent version by specifying the keyword `latest` as the value. +- If the `policyUID` field is not defined, the latest version of the specified security policy will be used. This field **does not allow** use of the keyword `latest`. + +{{}} + +| Method | Endpoint | +|--------|--------------------------------------| +| POST | `/api/platform/v1/security/policies/bundles` | + +{{}} + +For example: + +```shell +curl -X POST https://{{NMS_FQDN}}/api/platform/v1/security/policies/bundles \ + -H "Authorization: Bearer " \ + -d @security-policy-bundles.json +``` + +
    +JSON Request + +```json +{ + "bundles": [{ + "appProtectWAFVersion": "4.457.0", + "policyName": "default-enforcement", + "policyUID": "29d86fe8-612a-5c69-895a-04fc5b9849a6", + "attackSignatureVersionDateTime": "2023.06.20", + "threatCampaignVersionDateTime": "2023.07.18" + }, + { + "appProtectWAFVersion": "4.279.0", + "policyName": "default-enforcement", + "attackSignatureVersionDateTime": "latest", + "threatCampaignVersionDateTime": "latest" + }, + { + "appProtectWAFVersion": "4.457.0", + "policyName": "ignore-xss" + } + ] +} +``` + +
    + +
    +JSON Response + +```json +{ + "items": [{ + "metadata": { + "created": "2023-10-04T23:19:58.502Z", + "modified": "2023-10-04T23:19:58.502Z", + "appProtectWAFVersion": "4.457.0", + "policyName": "default-enforcement", + "policyUID": "29d86fe8-612a-5c69-895a-04fc5b9849a6", + "attackSignatureVersionDateTime": "2023.06.20", + "threatCampaignVersionDateTime": "2023.07.18", + "uid": "dceb8254-9a90-4e77-87ac-73070f821412" + }, + "content": "", + "compilationStatus": { + "status": "compiling", + "message": "" + } + }, + { + "metadata": { + "created": "2023-10-04T23:19:58.502Z", + "modified": "2023-10-04T23:19:58.502Z", + "appProtectWAFVersion": "4.279.0", + "policyName": "defautl-enforcement", + "policyUID": "04fc5b9849a6-612a-5c69-895a-29d86fe8", + "attackSignatureVersionDateTime": "2023.08.10", + "threatCampaignVersionDateTime": "2023.08.09", + "uid": "trs35lv2-9a90-4e77-87ac-ythn4967" + }, + "content": "", + "compilationStatus": { + "status": "compiling", + "message": "" + } + }, + { + "metadata": { + "created": "2023-10-04T23:19:58.502Z", + "modified": "2023-10-04T23:19:58.502Z", + "appProtectWAFVersion": "4.457.0", + "policyName": "ignore-xss", + "policyUID": "849a604fc5b9-612a-5c69-895a-86f29de8", + "attackSignatureVersionDateTime": "2023.08.10", + "threatCampaignVersionDateTime": "2023.08.09", + "uid": "nbu844lz-9a90-4e77-87ac-zze8861d" + }, + "content": "", + "compilationStatus": { + "status": "compiling", + "message": "" + } + } + ] +} +``` + + +--- + +## List Security Policy Bundles {#list-security-policy-bundles} + +To list security policy bundles, send an HTTP `GET` request to the Security Policies Bundles API endpoint. + +{{}}The list will only contain the security policy bundles that you have "READ" permissions for in Instance Manager.{{}} + +You can filter the results by using the following query parameters: + +- `includeBundleContent`: Boolean indicating whether to include the security policy bundle content for each bundle when getting a list of bundles or not. If not provided, defaults to `false`. Please note that the content returned is `base64 encoded`. +- `policyName`: String used to filter the list of security policy bundles; only security policy bundles that have the specified security policy name will be returned. If not provided, it will not filter based on `policyName`. +- `policyUID`: String used to filter the list of security policy bundles; only security policy bundles that have the specified security policy UID will be returned. If not provided, it will not filter based on `policyUID`. +- `startTime`: The security policy bundle's "modified time" has to be equal to or greater than this time value. If no value is supplied, it defaults to 24 hours from the current time. `startTime` has to be less than `endTime`. +- `endTime`: Indicates the time that the security policy bundles modified time has to be less than. If no value is supplied, it defaults to current time. `endTime` has to be greater than `startTime`. + +
    + + +{{}} + +| Method | Endpoint | +|--------|--------------------------------------| +| GET | `/api/platform/v1/security/policies/bundles` | + +{{}} + + +For example: + +```shell +curl -X GET https://{{NMS_FQDN}}/api/platform/v1/security/policies/bundles \ + -H "Authorization: Bearer " +``` + +
    +JSON Response + +```json +{ + "items": [{ + "metadata": { + "created": "2023-10-04T23:19:58.502Z", + "modified": "2023-10-04T23:19:58.502Z", + "appProtectWAFVersion": "4.457.0", + "policyName": "default-enforcement", + "policyUID": "29d86fe8-612a-5c69-895a-04fc5b9849a6", + "attackSignatureVersionDateTime": "2023.06.20", + "threatCampaignVersionDateTime": "2023.07.18", + "uid": "dceb8254-9a90-4e77-87ac-73070f821412" + }, + "content": "", + "compilationStatus": { + "status": "compiled", + "message": "" + } + }, + { + "metadata": { + "created": "2023-10-04T23:19:58.502Z", + "modified": "2023-10-04T23:19:58.502Z", + "appProtectWAFVersion": "4.279.0", + "policyName": "defautl-enforcement", + "policyUID": "04fc5b9849a6-612a-5c69-895a-29d86fe8", + "attackSignatureVersionDateTime": "2023.08.10", + "threatCampaignVersionDateTime": "2023.08.09", + "uid": "trs35lv2-9a90-4e77-87ac-ythn4967" + }, + "content": "", + "compilationStatus": { + "status": "compiled", + "message": "" + } + }, + { + "metadata": { + "created": "2023-10-04T23:19:58.502Z", + "modified": "2023-10-04T23:19:58.502Z", + "appProtectWAFVersion": "4.457.0", + "policyName": "ignore-xss", + "policyUID": "849a604fc5b9-612a-5c69-895a-86f29de8", + "attackSignatureVersionDateTime": "2023.08.10", + "threatCampaignVersionDateTime": "2023.08.09", + "uid": "nbu844lz-9a90-4e77-87ac-zze8861d" + }, + "content": "", + "compilationStatus": { + "status": "compiling", + "message": "" + } + } + ] +} +``` + +--- + +## Get a Security Policy Bundle {#get-security-policy-bundle} + +To get a specific security policy bundle, send an HTTP `GET` request to the Security Policies Bundles API endpoint that contains the security policy UID and security policy bundle UID in the path. + +{{}}You must have "READ" permission for the security policy bundle to be able to retrieve information about a bundle by using the REST API.{{}} + +
    + + +{{}} + +| Method | Endpoint | +|--------|--------------------------------------| +| GET | `/api/platform/v1/security/policies/{security-policy-uid}/bundles/{security-policy-bundle-uid}` | + +{{}} + + +For example: + +```shell +curl -X GET https://{{NMS_FQDN}}/api/platform/v1/security/policies/29d86fe8-612a-5c69-895a-04fc5b9849a6/bundles/trs35lv2-9a90-4e77-87ac-ythn4967 \ + -H "Authorization: Bearer " +``` + +The JSON response, shown in the example below, includes a `content` field that is base64 encoded. After you retrieve the information from the API, you will need to base64 decode the content field. You can include this in your API call, as shown in the following example cURL request: + +```bash +curl -X GET "https://{NMS_FQDN}/api/platform/v1/security/policies/{security-policy-uid}/bundles/{security-policy-bundle-uid}" -H "Authorization: Bearer xxxxx.yyyyy.zzzzz" | jq -r '.content' | base64 -d > security-policy-bundle.tgz +``` + +
    +JSON Response + +```json +{ + "metadata": { + "created": "2023-10-04T23:19:58.502Z", + "modified": "2023-10-04T23:19:58.502Z", + "appProtectWAFVersion": "4.457.0", + "policyUID": "29d86fe8-612a-5c69-895a-04fc5b9849a6", + "attackSignatureVersionDateTime": "2023.08.10", + "threatCampaignVersionDateTime": "2023.08.09", + "uid": "trs35lv2-9a90-4e77-87ac-ythn4967" + }, + "content": "ZXZlbnRzIHt9Cmh0dHAgeyAgCiAgICBzZXJ2ZXIgeyAgCiAgICAgICAgbGlzdGVuIDgwOyAgCiAgICAgICAgc2VydmVyX25hbWUgXzsKCiAgICAgICAgcmV0dXJuIDIwMCAiSGVsbG8iOyAgCiAgICB9ICAKfQ==", + "compilationStatus": { + "status": "compiled", + "message": "" + } +} +``` + +--- + +## Create a Security Log Profile {#create-security-log-profile} + +Send an HTTP `POST` request to the Security Log Profiles API endpoint to upload a new security log profile. + +{{}}Before sending a security log profile to Instance Manager, you need to encode it using `base64`. Submitting a log profile in its original JSON format will result in an error.{{}} + +
    + + +{{}} + +| Method | Endpoint | +|--------|--------------------------------------| +| POST | `/api/platform/v1/security/logprofiles` | + +{{}} + + +For example: + +```shell +curl -X POST https://{{NMS_FQDN}}/api/platform/v1/security/logprofiles \ + -H "Authorization: Bearer " \ + -d @default-log-example.json +``` + +
    +JSON Request + +```json +{ + "metadata": { + "name": "default-log-example" + }, + "content": "Cgl7CgkJImZpbHRlciI6IHsKCQkJInJlcXVlc3RfdHlwZSI6ICJpbGxlZ2FsIgoJCX0sCgkJImNvbnRlbnQiOiB7CgkJCSJmb3JtYXQiOiAiZGVmYXVsdCIsCgkJCSJtYXhfcmVxdWVzdF9zaXplIjogImFueSIsCgkJCSJtYXhfbWVzc2FnZV9zaXplIjogIjVrIgoJCX0KCX0=" +} +``` + +
    + +
    +JSON Response + +```json +{ + "metadata": { + "created": "2023-07-05T22:09:19.634358096Z", + "externalIdType": "", + "modified": "2023-07-05T22:09:19.634358096Z", + "name": "default-log-example", + "revisionTimestamp": "2023-07-05T22:09:19.634358096Z", + "uid": "54c35ad7-e082-4dc5-bb5d-2640a17d5620" + }, + "selfLink": { + "rel": "/api/platform/v1/security/logprofiles/54c35ad7-e082-4dc5-bb5d-2640a17d5620" + } +} +``` + +--- + +## Update a Security Log Profile + +To update a security log profile, send an HTTP `POST` request to the Security Log Profiles API endpoint, `/api/platform/v1/security/logprofiles`. + +You can use the optional `isNewRevision` parameter to indicate whether the updated log profile is a new version of an existing log profile. + + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------| +| POST | `/api/platform/v1/security/logprofiles?isNewRevision=true` | +| PUT | `/api/platform/v1/security/logprofiles/{security-log-profile-uid}` | + +{{}} + + +For example: + +```shell +curl -X POST https://{{NMS_FQDN}}/api/platform/v1/security/logprofiles?isNewRevision=true \ + -H "Authorization: Bearer " \ + -d @update-default-log.json +``` + +You can update a specific log profile by sending an HTTP `PUT` request to the Security Log Profiles API endpoint that includes the log profile's unique identifier (UID). + +To find the UID, send an HTTP `GET` request to the Security Log Profiles API endpoint. This returns a list of all Security Log Profiles that contains the unique identifier for each log profile. + +Include the UID for the security log profile in your `PUT` request to update the log profile. + +For example: + +```shell +curl -X PUT https://{{NMS_FQDN}}/api/platform/v1/security/logprofiles/23139e0a-4ac8-49f9-b7a0-0577b42c70c7 \ + -H "Authorization: Bearer " \ + --Content-Type application/json -d @update-default-log.json +``` + +After you have pushed an updated security log profile, you can [publish it](#publish-policy) to selected instances or instance groups. + +--- + +## Delete a Security Log Profile + +To delete a security log profile, send an HTTP `DELETE` request to the Security Log Profiles API endpoint that includes the unique identifier for the log profile that you want to delete. + + +{{}} + +| Method | Endpoint | +|--------|------------------------------------------------------------| +| DELETE | `/api/platform/v1/security/logprofiles/{security-log-profile-uid}` | + +{{}} + + +For example: + +```shell +curl -X DELETE https://{{NMS_FQDN}}/api/platform/v1/security/logprofiles/23139e0a-4ac8-49f9-b7a0-0577b42c70c7 \ + -H "Authorization: Bearer " +``` + +--- + +## Publish Updates to Instances {#publish-policy} + +The Publish API lets you distribute security policies, security log profiles, attack signatures, and/or threat campaigns to instances and instance groups. + +{{}}Use this endpoint *after* you've added or updated security policies, security log profiles, attack signatures, and/or threat campaigns.{{}} + + +{{}} + +| Method | Endpoint | +|--------|-------------------------------------| +| POST | `/api/platform/v1/security/publish` | + +{{}} + + +When making a request to the Publish API, make sure to include all the necessary information for your specific use case: + +- Instance and/or Instance Group UID(s) to push the bundle to +- Threat Campaign version and UID +- Attack Signature version and UID +- Security Policy UID(s) +- Security Log Profile UID(s) + +For example: + +```shell +curl -X PUT https://{{NMS_FQDN}}/api/platform/v1/security/publish -H "Authorization: Bearer " +``` + +
    +JSON Request + +```json +{ + "publications": [ + { + "attackSignatureLibrary": { + "uid": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "versionDateTime": "2022.10.02" + }, + "instanceGroups": [ + "3fa85f64-5717-4562-b3fc-2c963f66afa6" + ], + "instances": [ + "3fa85f64-5717-4562-b3fc-2c963f66afa6" + ], + "logProfileContent": { + "name": "default-log", + "uid": "ffdbda39-88be-420a-b673-19d4183b7e4c" + }, + "policyContent": { + "name": "default-enforcement", + "uid": "3fa85f64-5717-4562-b3fc-2c963f66afa6" + }, + "threatCampaign": { + "uid": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "versionDateTime": "2022.10.01" + } + } + ] +} +``` + +
    + +
    +JSON Response + +```json +{ + "deployments": [ + { + "deploymentUID": "ddc781ca-15d6-46c9-86ea-e7bdb91e8dec", + "links": { + "rel": "/api/platform/v1/security/deployments/ddc781ca-15d6-46c9-86ea-e7bdb91e8dec" + }, + "result": "Publish security content request Accepted" + } + ] +} +``` + +
    + +--- + +## Check Security Policy and Security Log Profile Publication Status +When publishing an NGINX configuration that references a security policy and secuity log profile, the Instance Manager REST APIs can provide further details about the status of the configuration publications. To access this information, use the Instance Manager API endpoints and method as indicated. + +To retrieve the details for the different configuration publication statuses for a particular security policy, send an HTTP `GET` request to the Security Deployments Associations API endpoint, providing the name of the security policy. + +| Method | Endpoint | +|--------|-----------------------------------------------------------------------------| +| GET | `/api/platform/v1/security/deployments/associations/{security-policy-name}` | + +You can locate the configuration publication status in the response within the field `lastDeploymentDetails` for instances and instance groups: + +- `lastDeploymentDetails` (for an instance): associations -> instance -> lastDeploymentDetails +- `lastDeploymentDetails` (for an instance in an instance group): associations -> instanceGroup -> instances -> lastDeploymentDetails + +The example below shows a call to the `security deployments associations` endpoint and the corresponding JSON response containing successful deployments. + +```shell +curl -X GET "https://{NGINX-INSTANCE-MANAGER-FQDN}/api/platform/v1/security/deployments/associations/ignore-xss" -H "Authorization: Bearer " +``` + +
    +JSON Response + +```json +{ + "associations": [ + { + "attackSignatureLibrary": { + "uid": "c69460cc-6b59-4813-8d9c-76e4a6c56b4b", + "versionDateTime": "2023.02.16" + }, + "instance": { + "hostName": "ip-172-16-0-99", + "lastDeploymentDetails": { + "createTime": "2023-04-11T21:36:11.519174534Z", + "details": { + "failure": [], + "pending": [], + "success": [ + { + "name": "ip-172-16-0-99" + } + ] + }, + "id": "19cf5ed4-29d6-4139-b5f5-308c0d0ebb13", + "message": "Instance config successfully published to", + "status": "successful", + "updateTime": "2023-04-11T21:36:14.008108979Z" + }, + "systemUid": "0435a5de-41c1-3754-b2e8-9d9fe946bafe", + "uid": "29d86fe8-612a-5c69-895a-04fc5b9849a6" + }, + "instanceGroup": { + "displayName": "inst_group_1", + "instances": [ + { + "hostName": "hostname1", + "systemUid": "49d143c2-f556-4cd7-8658-76fff54fb861", + "uid": "c8e15dcf-c504-4b7f-b52d-def7b8fd2f64", + "lastDeploymentDetails": { + "createTime": "2023-04-11T21:36:11.519174534Z", + "details": { + "failure": [], + "pending": [], + "success": [ + { + "name": "ip-172-16-0-99" + } + ] + }, + "id": "19cf5ed4-29d6-4139-b5f5-308c0d0ebb13", + "message": "Instance config successfully published to", + "status": "successful", + "updateTime": "2023-04-11T21:36:14.008108979Z" + }, + }, + { + "hostName": "hostname2", + "systemUid": "88a99ab0-15bb-4719-9107-daf5007c33f7", + "uid": "ed7e9173-794f-41af-80d9-4ed37d593247", + "lastDeploymentDetails": { + "createTime": "2023-04-11T21:36:11.519174534Z", + "details": { + "failure": [], + "pending": [], + "success": [ + { + "name": "ip-172-16-0-99" + } + ] + }, + "id": "19cf5ed4-29d6-4139-b5f5-308c0d0ebb13", + "message": "Instance config successfully published to", + "status": "successful", + "updateTime": "2023-04-11T21:36:14.008108979Z" + }, + } + ], + "uid": "51f8addc-c0e9-438b-b0b6-3e4f1aa8202d" + }, + "policyUid": "9991f237-d9c7-47b7-98aa-faa836838f38", + "policyVersionDateTime": "2023-04-11T21:18:19.183Z", + "threatCampaign": { + "uid": "eab683fe-c2f1-4910-a88c-8bfbc6363164", + "versionDateTime": "2023.02.15" + } + } + ] +} +``` + +
    + +To retrieve the details for the different configuration publication statuses for a particular security log profile, send an HTTP `GET` request to the Security Deployments Associations API endpoint, providing the name of the security log profile. + +| Method | Endpoint | +|--------|-----------------------------------------------------------------------------| +| GET | `/api/platform/v1/security/deployments/logprofiles/associations/{security-log-profile-name}` | + +You can locate the configuration publication status in the response within the field `lastDeploymentDetails` for instances and instance groups: + +- `lastDeploymentDetails` (for an instance): associations -> instance -> lastDeploymentDetails +- `lastDeploymentDetails` (for an instance in an instance group): associations -> instanceGroup -> instances -> lastDeploymentDetails + +The example below shows a call to the `security deployments associations` endpoint and the corresponding JSON response containing successful deployments. + +```shell +curl -X GET "https://{NGINX-INSTANCE-MANAGER-FQDN}/api/platform/v1/security/deployments/logprofiles/associations/default-log" -H "Authorization: Bearer " +``` + +
    +JSON Response + +```json +{ + "associations": [ + { + "instance": { + "hostName": "", + "systemUid": "", + "uid": "" + }, + "instanceGroup": { + "displayName": "ig1", + "instances": [ + { + "hostName": "ip-172-16-0-142", + "systemUid": "1d1f03ff-02de-32c5-8dfd-902658aada4c", + "uid": "18d074e6-3868-51ba-9999-b7466a936815" + } + ], + "lastDeploymentDetails": { + "createTime": "2023-07-05T23:01:06.679136973Z", + "details": { + "failure": [], + "pending": [], + "success": [ + { + "name": "ip-172-16-0-142" + } + ] + }, + "id": "9bfc9db7-877d-4e8e-a43d-9660a6cd11cc", + "message": "Instance Group config successfully published to ig1", + "status": "successful", + "updateTime": "2023-07-05T23:01:06.790802157Z" + }, + "uid": "0df0386e-82f7-4efc-863e-5d7cfbc3f7df" + }, + "logProfileUid": "b680f7c3-6fc0-4c6b-889a-3025580c7fcb", + "logProfileVersionDateTime": "2023-07-05T22:08:47.371Z" + }, + { + "instance": { + "hostName": "ip-172-16-0-5", + "lastDeploymentDetails": { + "createTime": "2023-07-05T21:45:08.698646791Z", + "details": { + "failure": [], + "pending": [], + "success": [ + { + "name": "ip-172-16-0-5" + } + ] + }, + "id": "73cf670a-738a-4a74-b3fb-ac9771e89814", + "message": "Instance config successfully published to", + "status": "successful", + "updateTime": "2023-07-05T21:45:08.698646791Z" + }, + "systemUid": "0afe5ac2-43aa-36c8-bcdc-7f88cdd35ab2", + "uid": "9bb4e2ef-3746-5d79-b526-e545fad27e90" + }, + "instanceGroup": { + "displayName": "", + "instances": [], + "uid": "" + }, + "logProfileUid": "bb3badb2-f8f5-4b95-9428-877fc208e2f1", + "logProfileVersionDateTime": "2023-07-03T21:46:17.006Z" + } + ] +} +``` + +
    + +To retrieve the configuration publication status details for a particular instance, send an HTTP `GET` request to the Instances API endpoint, providing the unique system and instance identifiers. + +| Method | Endpoint | +|--------|-----------------------------------------------------------------| +| GET | `/api/platform/v1/systems/{system-uid}/instances/{instance-id}` | + +You can locate the configuration publication status in the the response within the `lastDeploymentDetails` field, which contains additional fields that provide more context around the status. + +The example below shows a call to the `instances` endpoint and the corresponding JSON response containing a compiler related error message. + +```shell +curl -X GET "https://{NGINX-INSTANCE-MANAGER-FQDN}/api/platform/v1/systems/b9df6377-2c4f-3266-a64a-e064b0371c73/instances/5663cf4e-a0c7-50c8-b93c-16fd11a0f00b" -H "Authorization: Bearer " +``` + +
    +JSON Response + +```json +{ + "build": { + "nginxPlus": true, + "release": "nginx-plus-r28", + "version": "1.23.2" + }, + "configPath": "/etc/nginx/nginx.conf", + "configVersion": { + "instanceGroup": { + "createTime": "0001-01-01T00:00:00Z", + "uid": "", + "versionHash": "" + }, + "versions": [ + { + "createTime": "2023-01-14T10:48:46.319Z", + "uid": "5663cf4e-a0c7-50c8-b93c-16fd11a0f00b", + "versionHash": "922e9d40fa6d4dd3a4b721295b8ecd95f73402644cb8d234f9f4f862b8a56bfc" + } + ] + }, + "displayName": "ip-192-0-2-27", + "links": [ + { + "rel": "/api/platform/v1/systems/b9df6377-2c4f-3266-a64a-e064b0371c73", + "name": "system" + }, + { + "rel": "/api/platform/v1/systems/b9df6377-2c4f-3266-a64a-e064b0371c73/instances/5663cf4e-a0c7-50c8-b93c-16fd11a0f00b", + "name": "self" + }, + { + "rel": "/api/platform/v1/systems/instances/deployments/b31c6ab1-4a46-4c81-a065-204575145e8e", + "name": "deployment" + } + ], + "processPath": "/usr/sbin/nginx", + "registrationTime": "2023-01-14T10:12:31.000Z", + "startTime": "2023-01-14T10:09:43Z", + "status": { + "lastStatusReport": "2023-01-14T11:11:49.323495017Z", + "state": "online" + }, + "uid": "5663cf4e-a0c7-50c8-b93c-16fd11a0f00b", + "version": "1.23.2", + "appProtect": { + "attackSignatureVersion": "Available after publishing Attack Signatures from Instance Manager", + "status": "active", + "threatCampaignVersion": "Available after publishing Threat Campaigns from Instance Manager", + "version": "4.2.0" + }, + "configureArgs": [ + ... + ], + "lastDeploymentDetails": { + "createTime": "2023-01-14T11:10:25.096812852Z", + "details": { + "error": "{\"instance:b9df6377-2c4f-3266-a64a-e064b0371c73\":\"failed building config payload: policy compilation failed for deployment b31c6ab1-4a46-4c81-a065-204575145e8e due to integrations service error: the specified compiler (4.2.0) is missing, please install it and try again.\"}", + "failure": [ + { + "failMessage": "failed building config payload: policy compilation failed for deployment b31c6ab1-4a46-4c81-a065-204575145e8e due to integrations service error: the specified compiler (4.2.0) is missing, please install it and try again.", + "name": "ip-192-0-2-27" + } + ], + "pending": [], + "success": [] + }, + "id": "b31c6ab1-4a46-4c81-a065-204575145e8e", + "message": "Instance config failed to publish to", + "status": "failed", + "updateTime": "2023-01-14T11:10:25.175145693Z" + }, + "loadableModules": [ + ... + ], + "packages": [ + ... + ], + "processId": "10345", + "ssl": { + "built": null, + "runtime": null + } +} +``` + +
    + +When you use the Publish API (`/security/publish`) to [publish a security policy and security log profile](#publish-policy), Instance Manager creates a deployment ID for the request. To view the status of the update, or to check for any errors, use the endpoint and method shown below and reference the deployment ID. + +| Method | Endpoint | +|--------|------------------------------------------------------------------| +| GET | `/api/platform/v1/systems/instances/deployments/{deployment-id}` | + +You can locate the configuration publication status in the the response within the `details` field, which contains additional fields that provide more context around the status. + +The example below shows a call to the `deployments` endpoint and the corresponding JSON response containing a compiler error message. + +```shell +curl -X GET --url "https://{NGINX-INSTANCE-MANAGER-FQDN}/api/platform/v1/systems/instances/deployments/d38a8e5d-2312-4046-a60f-a30a4aea1fbb" \ + -H "Authorization: Bearer " +``` + +
    +JSON Response + +```json +{ + "createTime": "2023-01-14T04:35:47.566082799Z", + "details": { + "error": "{\"instance:8a2092aa-5612-370d-bff0-5d7521e206d6\":\"failed building config payload: policy bundle compilation failed for d38a8e5d-2312-4046-a60f-a30a4aea1fbb, integrations service returned the following error: missing the specified compiler (4.2.0) please install it and try again\"}", + "failure": [ + { + "failMessage": "failed building config payload: policy bundle compilation failed for d38a8e5d-2312-4046-a60f-a30a4aea1fbb, integrations service returned the following error: missing the specified compiler (4.2.0) please install it and try again", + "name": "ip-192-0-2-243" + } + ], + "pending": [], + "success": [] + }, + "id": "d38a8e5d-2312-4046-a60f-a30a4aea1fbb", + "message": "Instance config failed to publish to", + "status": "failed", + "updateTime": "2023-01-14T04:35:47.566082799Z" +} +``` + +
    diff --git a/content/nim/nginx-app-protect/overview-nap-waf-config-management.md b/content/nim/nginx-app-protect/overview-nap-waf-config-management.md new file mode 100644 index 000000000..d1b7794c2 --- /dev/null +++ b/content/nim/nginx-app-protect/overview-nap-waf-config-management.md @@ -0,0 +1,71 @@ +--- +description: Learn how you can use F5 NGINX Management Suite Instance Manager to configure + NGINX App Protect WAF security policies. +docs: DOCS-992 +doctypes: +- reference +tags: +- docs +title: NGINX App Protect WAF configuration management +toc: true +weight: 500 +--- + +## Overview + +F5 NGINX Management Suite Instance Manager provides configuration management for [NGINX App Protect WAF](https://www.nginx.com/products/nginx-app-protect/web-application-firewall/). + +You can use NGINX App Protect WAF with Instance Manager to inspect incoming traffic, identify potential threats, and block malicious traffic. With Configuration Management for App Protect WAF, you can configure WAF security policies in a single location and push your configurations out to one, some, or all of your NGINX App Protect WAF instances. + +### Features + +- Manage NGINX App Protect WAF security configurations by using the NGINX Management Suite user interface or REST API +- Update Attack Signatures and Threat Campaign packages +- Compile security configurations into a binary bundle for consumption by NGINX App Protect WAF instances +- Provide metrics to the [NGINX Management Suite Security Monitoring]({{< relref "/nms/security/" >}}) module. The Security Monitoring module lets you monitor the security of your applications and APIs and get protection insights that help when analyzing possible threats and tuning security policies. + +## Architecture + +As demonstrated in Figure 1, Instance Manager lets you manage security configurations for NGINX App Protect WAF. You can define security policies, upload attack signatures and threat campaign packages, and publish common configurations out to your NGINX App Protect WAF instances. Instance Manager can compile the security configuration into a bundle before pushing the configuration to the NGINX App Protect WAF data plane instances. The NGINX Management Suite Security Monitoring module provides data visualization for NGINX App Protect, so you can monitor, analyze, and refine your policies. + +{{< img src="nim/app-sec-overview.png" caption="Figure 1. NGINX Management Suite with NGINX App Protect Architecture Overview" alt="A diagram showing the architecture of the NGINX Management Suite with NGINX App Protect solution" width="75%">}} + +### Security Bundle Compilation {#security-bundle} + +Instance Manager provides a compiler that can be configured to bundle the complete security configuration -- including JSON security policies, attack signatures, threat campaigns, and log profiles -- into a single binary in `.tgz` format. This bundle is then pushed out to each selected NGINX App Protect WAF instance. + +Performing the security bundle compilation on Instance Manager (precompiled publication) instead of on the NGINX App Protect WAF instances provides the following benefits: + +- Eliminates the need to provision system resources on NGINX App Protect WAF instances to perform compilation. +- The bundles produced by Instance Manager can be reused by multiple NGINX App Protect WAF instances, instead of each instance having to perform the compilation separately. + +However, if you prefer to maintain policy compilation on the NGINX App Protect WAF instance, that is supported with the following limitation: + +- Instance Manager does not publish JSON policies to the NGINX App Protect WAF instance. JSON policies referenced in an NGINX configuration must already exist on the NGINX App Protect WAF instance. + +The example [`location`](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) context below enables NGINX App Protect WAF and tells NGINX where to find the compiled security bundle: + +## Log Profile Compilation + +Instance Manager can also be configured to compile log profiles when you install a new version of the WAF compiler. When you publish an NGINX configuration with the NGINX App Protect [`app_protect_security_log`](https://docs.nginx.com/nginx-app-protect/logging-overview/security-log/#app_protect_security_log) directive, Instance Manager publishes the compiled log profiles to the NGINX App Protect WAF instances when precompiled publication is enabled. + +{{}} +Instance Manager and Security Monitoring both use NGINX App Protect log profiles. The configuration requirements for each are different. When using Instance Manager configuration management, you must reference the log profile in your NGINX configuration using the `.tgz` file extension instead of `.json`. +{{}} + +## Security Management APIs + +By using the Instance Manager REST API, you can automate configuration updates to be pushed out to all of your NGINX App Protect WAF instances. You can use the Instance Manager API to manage and deploy the following security configurations: + +- security policies, +- log profiles, +- attack signatures, and +- threat campaigns. + +Just as with changes made via the user interface, the Instance Manager compiler bundles all of the config updates into a single binary package that you can push out to your instances. Figure 2 shows an overview of the API endpoints available to support security policy configuration and publishing. + +{{< img src="nim/app-sec-api-overview.png" caption="Figure 2. NGINX Management Suite with NGINX App Protect WAF Architecture Overview" alt="A diagram showing the architecture of the NGINX Management Suite with NGINX App Protect solution">}} + +More information is available in the Instance Manager API documentation. + +{{< include "nim/how-to-access-api-docs.md" >}} diff --git a/content/nim/nginx-app-protect/setup-waf-config-management.md b/content/nim/nginx-app-protect/setup-waf-config-management.md new file mode 100644 index 000000000..19983ca37 --- /dev/null +++ b/content/nim/nginx-app-protect/setup-waf-config-management.md @@ -0,0 +1,1200 @@ +--- +description: Learn how to use F5 NGINX Management Suite Instance Manager to secure your + applications with NGINX App Protect WAF security policies. +docs: DOCS-996 +doctypes: +- task +tags: +- docs +title: Manage Your App Protect WAF Configs +toc: true +weight: 100 +--- + +{{< shortversions "2.6.0" "latest" "nimvers" >}} + +## Overview + +Instance Manager helps you manage your F5 NGINX App Protect WAF configurations, making it easy to stay secure. This guide shows you how to set up Instance Manager to configure and manage NGINX App Protect WAF. + +### Before You Begin + +Complete the following prerequisites before proceeding with this guide. + +- You have one or more instances of [NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect/admin-guide/install/) installed and running. See [Support for NGINX App Protect WAF]({{< relref "tech-specs#support-for-nginx-app-protect-waf" >}}) for a list of supported versions. + + {{}}If you are using configuration management and the NGINX Management Suite Security Monitoring module, follow the instructions in the [setup guide]({{}}) to set up your NGINX App Protect instances before proceeding with this guide.{{}} + +- You have Instance Manager v2.6.0 or later [installed]({{< relref "/nim/deploy/vm-bare-metal/_index.md" >}}), licensed, and running. + If you have a subscription to NGINX App Protect WAF, you can find your Instance Manager license in the subscription details section of [MyF5](https://my.f5.com). + +### Limitations + +{{}}App Protect WAF Config management is currently not supported when [deploying Instance Manager on Kubernetes]({{}}).{{}} + +Instance Manager does not support the following NGINX App Protect features: + +- [Policies with external references](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#external-references) +- [Policies with modifications](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#modifying-configurations) +- Custom signatures + +--- + +## Install the WAF Compiler + +Instance Manager can use the NGINX App Protect WAF compiler to "pre-compile" security configurations before syncing them to managed data plane instances. You'll need to install the WAF compiler package on the NGINX Management Suite host to enable this functionality. If you'll be continuing with WAF compilation on the data plane host, installing the WAF compiler on the NGINX Management Suite host is not necessary. + +Be sure to download and install the correct WAF compiler version for your environment: + +- Each NGINX App Protect version has a corresponding WAF compiler version. You must install the WAF compiler that matches the version of NGINX App Protect that you have running. +- If you have different NGINX App Protect versions running, install the correct WAF compiler package for each on the management plane host. Instance Manager will use the correct WAF compiler for each version to bundle the security configurations. +- You can create [instance groups]({{< relref "/nim/nginx-instances/manage-instance-groups" >}}) to keep track of and manage all instances that have the same version installed. + +For more information about the WAF compiler, refer to the [Security Bundle Compilation]({{< relref "/nim/nginx-app-protect/overview-nap-waf-config-management#security-bundle" >}}) section of the Policy Configuration overview topic. + +### WAF Compiler and Supported App Protect Versions {#nap-waf-compiler-compatibility} + +The following table shows the NGINX App Protect WAF Release version and its corresponding WAF compiler version: + +{{}} + +| NGINX App Protect WAF Release version | WAF Compiler | +|---------------------------------------|----------------------------| +| NGINX App Protect WAF 5.4.0 | nms-nap-compiler-v5.210.0 | +| NGINX App Protect WAF 5.3.0 | nms-nap-compiler-v5.144.0 | +| NGINX App Protect WAF 5.2.0 | nms-nap-compiler-v5.48.0 | +| NGINX App Protect WAF 5.1.0 | nms-nap-compiler-v5.17.0 | +| NGINX App Protect WAF 4.12.0 | nms-nap-compiler-v5.210.0 | +| NGINX App Protect WAF 4.11.0 | nms-nap-compiler-v5.144.0 | +| NGINX App Protect WAF 4.10.0 | nms-nap-compiler-v5.48.0 | +| NGINX App Protect WAF 4.9.0 | nms-nap-compiler-v5.17.0 | +| NGINX App Protect WAF 4.8.1 | nms-nap-compiler-v4.815.0 | +| NGINX App Protect WAF 4.8.0 | nms-nap-compiler-v4.762.0 | +| NGINX App Protect WAF 4.7.0 | nms-nap-compiler-v4.641.0 | +| NGINX App Protect WAF 4.6.0 | nms-nap-compiler-v4.583.0 | +| NGINX App Protect WAF 4.5.0 | nms-nap-compiler-v4.457.0 | +| NGINX App Protect WAF 4.4.0 | nms-nap-compiler-v4.402.0 | +| NGINX App Protect WAF 4.3.0 | nms-nap-compiler-v4.279.0 | +| NGINX App Protect WAF 4.2.0 | nms-nap-compiler-v4.218.0 | +| NGINX App Protect WAF 4.1.0 | nms-nap-compiler-v4.100.1 | +| NGINX App Protect WAF 4.0.0 | nms-nap-compiler-v4.2.0 | +| NGINX App Protect WAF 3.12.2 | nms-nap-compiler-v3.1088.2 | + +{{}} + +
    + +{{}} + +- The install commands in this guide use an example version to show the correct command format. + + Be sure to replace the version string in the example with the correct version to suit your needs. + You can find the package versions in the [NGINX App Protect WAF Release Notes](https://docs.nginx.com/nginx-app-protect/releases/). + +- The WAF compiler installs to the `/opt` directory. Set the permission level for the directory as appropriate to allow write access by the owner (for example, `0755`). + +{{}} + +### Debian or Ubuntu + +Install the WAF compiler, then restart the `nms-integrations` service: + +```bash +sudo apt-get install nms-nap-compiler-v5.210.0 +``` + +{{}} + +- If you want to have more than one version of the `nms-nap-compiler` installed on your system at once, you'll need to append `-o Dpkg::Options::="--force-overwrite"` to the `nms-nap-compiler` installation commands after your initial `nms-nap-compiler` installation. For example, the installation command would look like this: + +```bash +sudo apt-get install nms-nap-compiler-v5.210.0 -o Dpkg::Options::="--force-overwrite" +``` + +{{}} + +### RHEL 8.1 or later + +Download the file dependencies.repo to `/etc/yum.repos.d`, enable the `codeready-builder` repository through subscription manager, and install the WAF compiler package: + +```bash +sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo +sudo subscription-manager repos --enable codeready-builder-for-rhel-8-x86_64-rpms +sudo yum install nms-nap-compiler-v5.210.0 +``` + +### RHEL 7.4 or later; CentOS +Download the file `dependencies.repo` to `/etc/yum.repos.d`, enable the RHEL 7 server repositories, and install the WAF compiler package. + +```bash +sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo +sudo yum-config-manager --enable rhui-REGION-rhel-server-optional rhui-REGION-rhel-server-releases rhel-7-server-optional-rpms +sudo yum install nms-nap-compiler-v5.210.0 +``` + +### Amazon Linux 2 LTS +Download the files `nms-amazon2.repo` and `app-protect-7.repo` to `/etc/yum.repos.d`, enable the `Extra Packages for Enterprise (EPEL)` repository, and install the WAF compiler package: + +```bash +sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nms-amazon2.repo +sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo +sudo amazon-linux-extras enable epel +sudo yum clean metadata +sudo yum install epel-release +sudo yum install nms-nap-compiler-v5.210.0 +``` + +### Oracle Linux 7.4 or later +Download the file `dependencies.repo` to `/etc/yum.repos.d`, enable the `ol8_codeready_builder` repository, and install the WAF compiler package: + +```bash +sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo +sudo yum-config-manager --enable ol8_codeready_builder +sudo yum install nms-nap-compiler-v5.210.0 +``` + +### Download from MyF5 + +If you are not able to access the public NGINX repository, you can download all of the required packages from [MyF5](https://my.f5.com/). + +Take the steps below to download the WAF compiler, Attack Signatures, and Threat Campaigns packages from MyF5. + +1. Log in to [MyF5](https://my.F5.com). +1. Go to **Resources** > **Downloads**. +1. Select **Group/Product Family**: **NGINX**. +1. Select **Product Line**: **NGINX App Protect**. +1. Select a **Product version**. +1. Select the **Linux distribution**, **distribution version**, and **Architecture**. +1. Download the WAF compiler package and transfer it to the NGINX Management Suite host. +1. Run the appropriate command on the host to install the WAF compiler package from the file. + + - Debian or Ubuntu: + + ``` bash + sudo apt-get install -f /path/to/nms-nap-compiler-_focal_amd64.deb + ``` + + {{}}For Debian or Ubuntu, if you want to have more than one version of the `nms-nap-compiler` installed on your system at once, you'll need to append `-o Dpkg::Options::="--force-overwrite"` to the `nms-nap-compiler` installation commands after your initial `nms-nap-compiler` installation. For example, the installation command would look like this: + +```bash +sudo apt-get install -f /path/to/nms-nap-compiler-_focal_amd64.deb -o Dpkg::Options::="--force-overwrite" +``` + + {{}} + + - RHEL, CentOS, or Oracle Linux: + + ``` bash + sudo yum install -f /path/to/nms-nap-compiler-_el8.ngx.x86_64.rpm + ``` + +### Automatically Download and Install New WAF Compiler + +Once a version of the NGINX App Protect WAF compiler is manually installed on Instance Manager, the system will automatically download and install a new WAF compiler when it detects that an update is required. This typically happens when the NGINX App Protect WAF version on the data plane has been [upgraded](#upgrade-nap-waf-version-on-managed-instances) or when a new data plane with a different NGINX App Protect WAF version is added. + +To enable the automatic download and installation of a new WAF compiler, you need to [upload your NGINX App Protect WAF certificate and key](#upload-nginx-app-protect-waf-certificate-and-key) to Instance Manager. This upload needs to be done only once. By providing the certificate and key, Instance Manager can securely fetch and install the latest WAF compiler from the NGINX repository. + +If the automatic download and install of the new WAF compiler step fails, when publishing the NGINX configuration, the error message + +``` text +missing the specific compiler, please install it and try again. +``` + +will appear. This happens if the NGINX App Protect WAF certificate and key are missing or not working, or if Instance Manager cannot connect to the NGINX Repository. Please check `/var/log/nms/nms.log` for errors. + +If you see the following error, your NGINX App Protect WAF certificate and key are missing, invalid, or have expired: + +```text +error when creating the nginx repo retriever - NGINX repo certificates not found +``` + +Also, please refer to [Install the WAF Compiler](#install-the-waf-compiler) for details on how to manually install the new WAF compiler. + +--- + +## Set Up Attack Signatures and Threat Campaigns + +NGINX App Protect provides predefined [Attack Signatures](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#configuring-attack-signatures) to protect your application against all attack types identified by the system. As new Attack Signatures are identified, they will become available for download so that your system will always have the most up-to-date protection. + +[Threat Campaigns](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#threat-campaigns) is a threat intelligence feature included in an NGINX App Protect WAF subscription. The feature includes frequent update feeds containing contextual information about active attack campaigns currently being observed by F5 Threat Labs that NGINX App Protect WAF can provide protection against. Just like Attack Signatures, the Threat Campaign patterns are updated regularly. Unlike Attack Signatures, the NGINX App Protect WAF installation does not include any Threat Campaigns and you need to install them in order for the protection to take effect. Due to the highly dynamic nature of those campaigns the updates are issued far more frequently than the Attack Signatures. You need to install those updates close to the time they are issued in order to get the most effective protection. + +In order to take advantage of new Attack Signature and Threat Campaign packages, you need to upload these packages to NGINX Management Suite. + +You can either configure Instance Manager to download new versions automatically, or manage the files manually by downloading the packages from MyF5 and then uploading them to Instance Manager by using the REST API. + +### Automatically Download Latest Packages {#automatically-download-latest-packages} + +#### Upload NGINX App Protect WAF Certificate and Key + +You will need to use your NGINX repo certificates to setup automatic retrieval of Attack Signatures and Threat Campaigns packages. When you upload your NGINX App Protect WAF certificate and key to NGINX Instance Manager, you are letting NGINX Instance Manager access the NGINX repo to get Attack Signature and Threat Campaign files on your behalf. + +1. Log in to [MyF5](https://account.f5.com/myf5) and go to **My Products and Plans > Subscriptions** to download the SSL certificate (*nginx-repo.crt*) and private key (*nginx-repo.key*) for your NGINX App Protect subscription. +1. Create a JSON file, similar to the example below, that contains the text from your crt and key files. + You will need to replace all of the newlines in the crt and key output with `\n`. + + ```json + { + "name": "nginx-repo", + "nginxResourceType": "NginxRepo", + "certPEMDetails": { + "caCerts": [], + "password": "", + "privateKey": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDt71QRqMl/7/rv\n[CONTENT SNIPPED]\n-----END PRIVATE KEY-----\n", + "publicCert": "-----BEGIN CERTIFICATE-----\nMIIEPzCCAyegAwIBAgIRANydR2VZ+mlt75SkttRyQTkwDQYJKoZIhvcNAQELBQAw\n[CONTENT SNIPPED]\n-----END CERTIFICATE-----", + "type": "PEM" + } + } + ``` + +1. Send an HTTP POST request to the [Instance Manager REST API]({{< relref "/nim/fundamentals/api-overview" >}}) to upload the repo certificate and key. + +
    + Example request + + ```shell + curl -X POST 'https://{{NMS_FQDN}}//api/platform/v1/certs' \ + --header "Authorization: Bearer " \ + --header "Content-Type: application/json" \ + -d@nginx-repo-certs.json + ``` + +
    + Example response + + ```json + { + "certAssignmentDetails": [], + "certMetadata": [ + { + "authorityKeyIdentifier": "", + "commonName": "", + "expired": false, + "expiry": 59789838, + "issuer": "C=US, ST=Washington, L=Seattle, Inc., O=F5 Networks\\, OU=Certificate Authority, CN=F5 PRD Issuing Certificate Authority TEEM V1", + "publicKeyType": "RSA (2048 bit)", + "serialNumber": "", + "signatureAlgorithm": "SHA256-RSA", + "subject": "CN=", + "subjectAlternativeName": "", + "subjectKeyIdentifier": "", + "thumbprint": "", + "thumbprintAlgorithm": "SHA256-RSA", + "validFrom": "2021-12-21T16:57:55Z", + "validTo": "2024-12-20T00:00:00Z", + "version": 3 + } + ], + "certPEMDetails": { + "caCerts": [], + "password": "**********", + "privateKey": "**********", + "publicCert": "[CONTENT SNIPPED]", + "type": "PEM" + }, + "created": "2023-01-27T23:42:41.587760092Z", + "modified": "2023-01-27T23:42:41.587760092Z", + "name": "nginx-repo", + "serialNumber": "", + "uid": "d08d9f54-58dd-447a-a71d-6fa5aa0d880c", + "validFrom": "2021-12-21T16:57:55Z", + "validTo": "2024-12-20T00:00:00Z" + } + ``` + +#### Enable automatic downloads + +Instance Manager can automatically download the latest Attack Signature and Threat Campaign versions. To enable automatic downloads, take the steps below. + +1. Log in to the management plane host using SSH. +1. Open the `/etc/nms/nms.conf` file for editing. +1. Adjust the `app_protect_security_update` options, shown in the example below, to enable and configure automatic downloads: + + ``` yaml + integrations: + # enable this for integrations on tcp + # address: 127.0.0.1:8037 + address: unix:/var/run/nms/integrations.sock + # Dqlite config + dqlite: + addr: 127.0.0.1:7892 + app_protect_security_update: + # Enable this setting to automatically retrieve the latest Attack Signatures and Threat Campaigns. + enable: true + # Enable this setting to specify how often, in hours, the latest Attack Signatures and Threat Campaigns are retrieved. + # The default interval is 6 hours, the maximum interval is 48 hours, and the minimum is 1 hour. + interval: 6 + # Enable this setting to specify how many updates to download for the latest Attack Signatures and Threat Campaigns. + # By default, the 10 latest updates are downloaded. The maximum value is 20, and the minimum value is 1. + number_of_updates: 10 + ``` + +1. Save the changes and close the file. +1. Restart the `nms-integrations` service: + + ``` bash + sudo systemctl restart nms-integrations + ``` + +If you do not see the latest Attack Signatures and Threat Campaigns packages downloaded as expected, please check `/var/log/nms/nms.log` for errors. + +If you see the following error, your NGINX App Protect WAF certificate and key are invalid or have expired: + +```text +error when creating the nginx repo retriever - NGINX repo certificates not found +``` + +### Manually Update Packages + +#### Download packages from MyF5 + +1. Log in to [MyF5](https://my.f5.com), then go to **Resources** > **Downloads**. +1. Select **Group/Product Family**: **NGINX**. +1. Select **Product Line**: **NGINX App Protect**. +1. Select a **Product version** (a version that matches your WAF compiler version). +1. Select your **Linux Distribution**, **Version** and **Architecture**. +1. Find and download the deb or rpm package starting with "app-protect-attack-signatures" for Attack Signatures and "app-protect-threat-campaigns" for Threat Campaigns. +1. Using utilities such as *az* or *rpm2cpio|cpio* to extract the following 3 files from the package: + - `signatures.bin.tgz` for Attack Signatures or `threat_campaigns.bin.tgz` for Threat Campaigns + - `signature_update.yaml` for Attack Signatures or `threat_campaign_update.yaml`for Threat Campaigns + - `version` + +1. Using a file archive utility, such as *tar*, bundle the three files into a single `.tgz` file. For example: + + ```shell + tar –czvf attack-signatures.tgz signatures.bin.tgz signature_update.yaml version + ``` + +#### Upload packages to Instance Manager + +You will need to use the [Instance Manager REST API]({{< relref "/nim/fundamentals/api-overview" >}}) to upload the bundled Attack Signatures and Threat Campaigns. + +
    +Attack Signatures Example + +```shell +curl -X POST 'https://{{NMS_FQDN}}//api/platform/v1/security/attack-signatures' \ + --header "Authorization: Bearer " \ + --form 'revisionTimestamp="2022.11.16"' \ + --form 'filename=@"/attack-signatures.tgz"' +``` + +
    + +
    +Threat Campaigns Example + +```shell +curl -X POST 'https://{{NMS_FQDN}}//api/platform/v1/security/threat-campaigns' \ + --header "Authorization: Bearer " \ + --form 'revisionTimestamp="2022.11.15"' \ + --form 'filename=@"/threat-campaigns.tgz"' +``` + +
    + +{{}}The bundle you upload for Attack Signatures or Threat Campaigns must correspond to the OS of your Instance Manager. For example, if your Instance Manager is running on ubuntu-20.04, then the bundle you upload for Attack Signatures or Threat Campaigns needs to be created from the ubuntu-20.04 packages.{{}} + +### Update the Security Monitoring Signature Database + +The Security Monitoring module's analytics dashboards make use of a Signature Database to provide more information on Attack Signatures that have triggered Security Violations, such as the Signature's name, accuracy, and risk level. + +To ensure that the dashboards show the most up-to-date information, you need to [update the Security Monitoring Signature Database]({{< relref "/nim/monitoring/security-monitoring/configure/update-signatures" >}}) + +--- + +## Setup Compiler Resource Pruning +You can configure the following compiler resources to prune automatically: + +- Compiled Security Policies +- Compiled Security Log Profiles +- Attack Signatures +- Threat Campaigns + +In the case of `compiled security policies` and `compiled security log profiles`, the definition of the `security policy` and/or `security log profile` is not removed. Only the compiled bundles associated with those resources are removed. + +To enable automatic compiler resource pruning, please follow these steps: + +1. Log in to the management plane host using SSH. +1. Open the `/etc/nms/nms.conf` file for editing. +1. Update the `policy_manager` field to contain the desired `time to live` values for each resource type; see the following snippet for an example of adding the necessary fields under `integrations`->`policy_manager`: + + ```yaml + integrations: + address: unix:/var/run/nms/integrations.sock + dqlite: + addr: 127.0.0.1:7892 + policy_manager: + # Time to live for attack signatures. If the attack signatures exceed their TTL and are not deployed to an instance or + # instance group they will be deleted from the database. Duration unit can be seconds (s), minutes (m), or hours (h). + attack_signatures_ttl: 336h + # Time to live for compiled bundles, this includes compiled security policies and compiled log profiles. If a compiled + # bundle exceeds its TTL and is not deployed to an instance or instance group it will be deleted from the database. Note + # that the compiled bundle is deleted, not the definition of it (i.e. the security policy or log profile definition). + # Duration unit can be seconds (s), minutes (m), or hours (h). + compiled_bundles_ttl: 336h + # Time to live for threat campaigns. If the threat campaigns exceed their TTL and are not deployed to an instance or + # instance group they will be deleted from the database. Duration unit can be seconds (s), minutes (m), or hours (h). + threat_campaigns_ttl: 1440h + app_protect_security_update: + enable: true + interval: 6 + number_of_updates: 10 + ``` + +1. Save the changes and close the file. +1. Restart the `nms-integrations` service: + + ``` bash + sudo systemctl restart nms-integrations + ``` + +The compiler resource pruning process occurs once upon start-up of the `nms-integrations` service and once every `24 hours` after the `nms-integrations` service has been started. + +--- + +## Onboard NGINX App Protect WAF Instances + +To onboard your NGINX App Protect WAF instances to Instance Manager, you need to install and configure NGINX Agent. + +### Install NGINX Agent + +1. Use SSH to connect to the NGINX App Protect WAF instance. Take the steps below for each instance to download and install NGINX Agent from the management plane host. + +1. Download the NGINX Agent package from the NGINX Management Suite host and run the agent install script. + + {{< tip >}}You can add instances with the same version of NGINX App Protect installed to an instance group by running the agent install command on each instance with the optional `--instance-group`` flag.{{< /tip>}} + {{< include "agent/installation/install-agent-api.md" >}} + +### Configure NGINX Agent + +1. Edit the NGINX Agent configuration file (`/etc/nginx-agent/nginx-agent.conf`) to allow access to the `/etc/app-protect` directory and enable reporting. + + - The agent needs access to any directories where NGINX App Protect configuration files are stored on the data plane host. + - The `report_interval` is the length of time the agent waits between checks for changes to NGINX App Protect WAF configurations. + - `precompiled_publication` enables the publication of precompiled NGINX App Protect security policies and log profiles. + + ```yaml + ... + config_dirs: "/etc/nginx:/usr/local/etc/nginx:/usr/share/nginx/modules:/etc/nms:/etc/app_protect" + extensions: + - nginx-app-protect + nginx_app_protect: + report_interval: 15s + precompiled_publication: true + ... + ``` + + {{}}You can use the NGINX Agent installation script to add these fields: + +```bash +# Download install script via API +curl https:///install/nginx-agent > install.sh + +# Specify the -m | --nginx-app-protect-mode flag to set up management of NGINX App Protect on +# the instance. In the example below we specify 'precompiled-publication' for the flag value +# which will make the config field 'precompiled_publication' set to 'true', if you would like to +# set the config field 'precompiled_publication' to 'false' you can specify 'none' as the flag value. +sudo sh ./install.sh --nginx-app-protect-mode precompiled-publication +``` + + {{}} + +1. Restart NGINX Agent. + + ``` bash + sudo systemctl restart nginx-agent + ``` + +### Verify Installation + +{{}} + +{{%tab name="UI"%}} + +You should now be able to view your NGINX App Protect WAF instances in the Instance Manager user interface. Take the steps below to verify that NGINX Agent is installed and reporting data to Instance Manager. + +1. {{< include "nim/webui-nim-login.md" >}} +1. Select **Instances**. +1. You should see the installed version listed in the **NGINX App Protect** column. +1. Select the instance, then scroll to the **App Protect Details** section. There, you should see the "App Protect WAF" status and "Build" should match the version installed on the instance. + +{{%/tab%}} + +{{%tab name="API"%}} + +{{< see-also >}}{{< include "nim/how-to-access-nim-api.md" >}}{{< /see-also >}} + +You can query the Instance Manager REST API to verify the following information: + +- NGINX App Protect WAF version +- NGINX App Protect WAF running status +- Total number of instances with NGINX App Protect WAF installed, out of the total number of NGINX instances + + +{{}} + +| Method | Endpoint | +|--------|------------------------------| +| GET | `/api/platform/v1/instances` | +| GET | `/api/platform/v1/systems` | + +{{}} + + +- Send an HTTP `GET` request to the `/api/platform/v1/systems` endpoint to find out what version of NGINX App Protect is running. This response will also show the Threat Campaign and Attack Signature package versions running on each instance. + +
    + JSON response + + ```json + { + "count": 3, + "items": [ + [...] + "appProtect": { + "attackSignatureVersion": "2022.11.16", + "status": "active", + "threatCampaignVersion": "2022.11.15", + "version": "build-3.954.0" + }, + [...] + ] + } + ``` + +
    + +- Send an HTTP `GET` request to the `/api/platform/v1/instances` endpoint to find out the number of instances with NGINX App Protect WAF installed. The total count will be in the `nginxAppProtectWAFCount` field in the response. + + For example: + +
    + JSON response + + ```json + { + "count": 3, + "items": [ + [...] + ], + "nginxAppProtectWAFCount": 2, + "nginxPlusCount": 3 + } + ``` + +
    + +
    + +{{%/tab%}} +{{
    }} + +### Configure Docker Compose for NGINX App Protect WAF Version 5 + +Version 5 of NGINX App Protect WAF provides a container-based architecture that requires some configuration changes to operate with Instance Manager. + +1. Edit the `docker-compose.yaml` you created according to [NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect-waf/v5/admin-guide/install/) to provide the containers with read access to the policies and log profiles written to the instance by Instance Manager. + + - Add the line `user: 101:nginx-agent-group` to each service, where `nginx-agent-group` is the ID of the NGINX Agent group. The value of this group ID can be determined with + + ```bash + cat /etc/group + ``` + + - Add the directory `/etc/nms` to the volume maps for both services + + For example: + + ```yaml + version: "3.9" + + services: + waf-enforcer: + container_name: waf-enforcer + image: private-registry.nginx.com/nap/waf-enforcer:5.2.0 + user: 101:1002 + environment: + - ENFORCER_PORT=50000 + ports: + - "50000:50000" + volumes: + - /opt/app_protect/bd_config:/opt/app_protect/bd_config + - /etc/nms:/etc/nms + networks: + - waf_network + restart: always + + waf-config-mgr: + container_name: waf-config-mgr + image: private-registry.nginx.com/nap/waf-config-mgr:5.2.0 + user: 101:1002 + volumes: + - /opt/app_protect/bd_config:/opt/app_protect/bd_config + - /opt/app_protect/config:/opt/app_protect/config + - /etc/app_protect/conf:/etc/app_protect/conf + - /etc/nms:/etc/nms + restart: always + network_mode: none + depends_on: + waf-enforcer: + condition: service_started + + networks: + waf_network: + driver: bridge + ``` + +1. Restart the containers: + + ``` bash + docker compose restart + ``` + +--- + +## Onboard Security Policies + +Instance Manager provides the same [default security policies](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#policy-configuration) as NGINX App Protect WAF: + +- **NGINX Default Policy**: provides [OWASP Top 10](https://owasp.org/www-project-top-ten/) and Bot security protection. +- **NGINX Strict Policy**: contains more restrictive criteria for blocking traffic than the default policy. + +If you want to use the out-of-the-box policies, you can proceed to the next section: [Add WAF configuration to NGINX Instances](#add-waf-config). + +Continue in this section if you have custom security policies that you want to upload to Instance Manager. + +### Upload Custom Security Policies + +If you onboarded NGINX App Protect WAF instances with existing security configurations, you'll need to use the Instance Manager REST API to onboard the security policies referenced in the `nginx.conf` on your NGINX App Protect instances. + +To do so, take the steps below for each policy: + +1. Use `base64` to encode the JSON policy. + + For example: + + ``` bash + base64 -i ./ignore-xss-policy-example.json + ``` + +
    + ignore-xss-policy-example.json + + ```json + { + "policy": { + "name": "ignore-xss", + "template": { + "name": "POLICY_TEMPLATE_NGINX_BASE" + }, + "applicationLanguage": "utf-8", + "enforcementMode": "blocking", + "signatures": [ + { + "signatureId": 200001475, + "enabled": false + }, + { + "signatureId": 200000098, + "enabled": false + }, + { + "signatureId": 200001148, + "enabled": false + }, + { + "signatureId": 200001480, + "enabled": false + }, + { + "signatureId": 200001088, + "enabled": false + } + ], + "bot-defense": { + "settings": { + "isEnabled": false + } + }, + "headers": [ + { + "name": "*", + "type": "wildcard", + "decodeValueAsBase64": "disabled" + }, + { + "name": "*-bin", + "type": "wildcard", + "decodeValueAsBase64": "required" + }, + { + "name": "Referer", + "type": "explicit", + "decodeValueAsBase64": "disabled" + }, + { + "name": "Authorization", + "type": "explicit", + "decodeValueAsBase64": "disabled" + }, + { + "name": "Transfer-Encoding", + "type": "explicit", + "decodeValueAsBase64": "disabled" + } + ], + "cookies": [ + { + "name": "*", + "type": "wildcard", + "decodeValueAsBase64": "disabled" + } + ], + "parameters": [ + { + "name": "*", + "type": "wildcard", + "decodeValueAsBase64": "disabled" + } + ] + } + } + ``` + +
    + +1. Create a JSON request body that contains the encoded policy. + + For example: + + ```json + { + "metadata": { + "name": "ignore-xss-example", + "displayName": "Ignore Cross Site Scripting example", + "description": "Security policy that intentionally ignores cross site scripting" + }, + "content": + "ewogICAgInBvbGljeSI6IHsKICAgICAgICAibmFtZSI6ICJpZ25vcmUteHNzIiwKICAgICAgICAidGVtcGxhdGUiOiB7CiAgICAgICAgICAgICJuYW1lIjogIlBPTElDWV9URU1QTEFURV9OR0lOWF9CQVNFIgogICAgICAgIH0sCiAgICAgICAgImFwcGxpY2F0aW9uTGFuZ3VhZ2UiOiAidXRmLTgiLAogICAgICAgICJlbmZvcmNlbWVudE1vZGUiOiAiYmxvY2tpbmciLAogICAgICAgICJzaWduYXR1cmVzIjogWwogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAic2lnbmF0dXJlSWQiOiAyMDAwMDE0NzUsCiAgICAgICAgICAgICAgICAiZW5hYmxlZCI6IGZhbHNlCiAgICAgICAgICAgIH0sCiAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICJzaWduYXR1cmVJZCI6IDIwMDAwMDA5OCwKICAgICAgICAgICAgICAgICJlbmFibGVkIjogZmFsc2UKICAgICAgICAgICAgfSwKICAgICAgICAgICAgewogICAgICAgICAgICAgICAgInNpZ25hdHVyZUlkIjogMjAwMDAxMTQ4LAogICAgICAgICAgICAgICAgImVuYWJsZWQiOiBmYWxzZQogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAic2lnbmF0dXJlSWQiOiAyMDAwMDE0ODAsCiAgICAgICAgICAgICAgICAiZW5hYmxlZCI6IGZhbHNlCiAgICAgICAgICAgIH0sCiAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICJzaWduYXR1cmVJZCI6IDIwMDAwMTA4OCwKICAgICAgICAgICAgICAgICJlbmFibGVkIjogZmFsc2UKICAgICAgICAgICAgfQogICAgICAgIF0sCiAgICAgICAgImJvdC1kZWZlbnNlIjogewogICAgICAgICAgICAic2V0dGluZ3MiOiB7CiAgICAgICAgICAgICAgICAiaXNFbmFibGVkIjogZmFsc2UKICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImhlYWRlcnMiOiBbCiAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICJuYW1lIjogIioiLAogICAgICAgICAgICAgICAgInR5cGUiOiAid2lsZGNhcmQiLAogICAgICAgICAgICAgICAgImRlY29kZVZhbHVlQXNCYXNlNjQiOiAiZGlzYWJsZWQiCiAgICAgICAgICAgIH0sCiAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICJuYW1lIjogIiotYmluIiwKICAgICAgICAgICAgICAgICJ0eXBlIjogIndpbGRjYXJkIiwKICAgICAgICAgICAgICAgICJkZWNvZGVWYWx1ZUFzQmFzZTY0IjogInJlcXVpcmVkIgogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAibmFtZSI6ICJSZWZlcmVyIiwKICAgICAgICAgICAgICAgICJ0eXBlIjogImV4cGxpY2l0IiwKICAgICAgICAgICAgICAgICJkZWNvZGVWYWx1ZUFzQmFzZTY0IjogImRpc2FibGVkIgogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAibmFtZSI6ICJBdXRob3JpemF0aW9uIiwKICAgICAgICAgICAgICAgICJ0eXBlIjogImV4cGxpY2l0IiwKICAgICAgICAgICAgICAgICJkZWNvZGVWYWx1ZUFzQmFzZTY0IjogImRpc2FibGVkIgogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAibmFtZSI6ICJUcmFuc2Zlci1FbmNvZGluZyIsCiAgICAgICAgICAgICAgICAidHlwZSI6ICJleHBsaWNpdCIsCiAgICAgICAgICAgICAgICAiZGVjb2RlVmFsdWVBc0Jhc2U2NCI6ICJkaXNhYmxlZCIKICAgICAgICAgICAgfQogICAgICAgIF0sCiAgICAgICAgImNvb2tpZXMiOiBbCiAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICJuYW1lIjogIioiLAogICAgICAgICAgICAgICAgInR5cGUiOiAid2lsZGNhcmQiLAogICAgICAgICAgICAgICAgImRlY29kZVZhbHVlQXNCYXNlNjQiOiAiZGlzYWJsZWQiCiAgICAgICAgICAgIH0KICAgICAgICBdLAogICAgICAgICJwYXJhbWV0ZXJzIjogWwogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAibmFtZSI6ICIqIiwKICAgICAgICAgICAgICAgICJ0eXBlIjogIndpbGRjYXJkIiwKICAgICAgICAgICAgICAgICJkZWNvZGVWYWx1ZUFzQmFzZTY0IjogImRpc2FibGVkIgogICAgICAgICAgICB9CiAgICAgICAgXQogICAgfQp9CiAgICAgICAgCg==" + } + ``` + +1. Send an HTTP `POST` request to the `/api/platform/v1/security/policies` endpoint to create the policy on Instance Manager. + + For example: + + ```shell + curl -X POST https://{{NMS_FQDN}}/api/platform/v1/security/policies \ + -H "Authorization: Bearer " --ContentType application/json \ + -d '{"content": "ewogICAgInBvbGljeSI6[CONTENT_SNIPPED]QogICAgfQp9CiAgICAgICAgCg==", \ + "metadata": {"description": "Ignore cross-site scripting is a security policy that intentionally ignores cross site scripting.", \ + "displayName": "Ignore cross-site scripting example", "name": "ignore-xss-example"}}' + ``` + + You should receive a success response similar to the example below: + + ```json + { + "metadata": { + "created": "2022-12-16T03:41:53.516Z", + "description": "Security policy that intentionally ignores cross site scripting", + "displayName": "Ignore Cross Site Scripting example", + "modified": "2022-12-16T03:47:34.465920964Z", + "name": "ignore-xss-example", + "revisionTimestamp": "2022-12-16T03:41:53.516Z", + "uid": "23139e0a-4ac8-49f9-b7a0-0577b42c70c7" + }, + "selfLink": { + "rel": "/api/platform/v1/security/policies/23139e0a-4ac8-49f9-b7a0-0577b42c70c7" + } + } + ``` + +1. Verify that your policies have been onboarded by sending an HTTP `GET` request to the `/api/platform/v1/security/policies` endpoint: + + ```shell + curl -X GET https://{{NMS_FQDN}}/api/platform/v1/security/policies \ + -H "Authorization: Bearer z" + ``` + + You should receive a success response similar to the example below: + +
    + Example response + + ```json + { + "items": [ + { + "content": "", + "metadata": { + "created": "2022-12-14T00:04:07.646Z", + "description": "The default policy provides OWASP Top 10 and Bot security protection", + "displayName": "NGINX Default Policy", + "modified": "2022-12-14T00:04:07.646Z", + "name": "NginxDefaultPolicy", + "revisionTimestamp": "2022-12-14T00:04:07.646Z", + "uid": "ae7d2ffc-972d-4951-a7ba-2340e1b8fe1c" + } + }, + { + "content": "", + "metadata": { + "created": "2022-12-14T00:04:07.65Z", + "description": "The strict policy contains more restrictive criteria for blocking traffic than the default policy", + "displayName": "NGINX Strict Policy", + "modified": "2022-12-14T00:04:07.65Z", + "name": "NginxStrictPolicy", + "revisionTimestamp": "2022-12-14T00:04:07.65Z", + "uid": "94665634-0d7e-4b72-87e8-491d951c8510" + } + }, + { + "content": "", + "metadata": { + "created": "2022-12-16T03:41:53.516Z", + "description": "Security policy that intentionally ignores cross site scripting", + "displayName": "Ignore Cross Site Scripting example", + "modified": "2022-12-16T03:47:34Z", + "name": "ignore-xss-example", + "revisionTimestamp": "2022-12-16T03:41:53.516Z", + "uid": "23139e0a-4ac8-49f9-b7a0-0577b42c70c7" + } + } + ] + } + ``` + +
    + +--- + +## Add WAF Configuration to NGINX Instances {#add-waf-config} + +The [NGINX App Protect WAF Configuration Guide](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#policy-configuration-overview) provides information about how and where to add the directives that allow you to add security to your instances. Instance Manager ships with the same reference policies as NGINX App Protect WAF: + +- NGINX Default Policy (`NginxDefaultPolicy.tgz`): Provides OWASP Top 10 and Bot security protection out of the box. +- NGINX Strict Policy (`NginxStrictPolicy.tgz`): Contains more restrictive criteria for blocking traffic than the default policy, with a higher risk of false positives. + +You can use either of these policies as-is. Many users treat the reference policy as starting point and customize it to suit the needs of their applications. The Security Monitoring dashboards provide insights that can help you fine-tune your security policies. + +When using Instance Manager to manage your WAF configuration, keep the following in mind: + +- Instance Manager can compile JSON security policies into a `.tgz` bundle. +- You can reference custom policy files using the `app_protect_policy_file` directive. + + {{}}If you already have JSON security policies referenced in your NGINX configuration, you can keep them as-is if precompiled publication is not enabled in the NGINX Agent. However, you'll need to change the file extension for the referenced files from `.json` to `.tgz` if precompiled publication is enabled. The file name for the compiled bundle will be the same as the original file. Instance Manager does not support both `.json` and `.tgz` files within a single NGINX configuration{{}} + +- If you are using custom policies, be sure NGINX Agent has permission to access the location(s) where the policies are stored on the data plane. To do so, edit the NGINX Agent configuration file on the data plane host and add the custom file path to the `config_dirs` setting. + +- Instance Manager uses the NGINX App Protect WAF [default log profiles](https://docs.nginx.com/nginx-app-protect/logging-overview/security-log/#default-logging-content). You can specify the desired log profile by using the `app_protect_security_log` directive. +Instance Manager does not support the use of custom log profiles. + +The examples in this guide use the default path for NGINX App Protect configuration files. If you have these files stored elsewhere on your data plane instance(s), be sure to use the correct file path when setting up your configuration. + +### Edit the NGINX configuration {#update-nginx-conf} + +By using the Instance Manager web interface or REST API, add the NGINX App Protect WAF configurations to the appropriate context in your NGINX configuration. + +The example below shows the directives added to a `location` block: + +```nginx +... +server { + ... + + location / { + ##Enable NGINX App Protect + app_protect_enable on; + ## Reference to a custom security policy bundle + app_protect_policy_file /etc/nms/ignore-xss.tgz; + ## enable logging + app_protect_security_log_enable on; + ## Reference to the log profile bundle + app_protect_security_log /etc/nms/log-default.tgz; + ... +} +``` + +{{< note >}}If you're using the NGINX Management Suite [Security Monitoring module]({{< relref "/nms/security/" >}}), you should already have the `app_protect_security_log` directive set to reference the `secops_dashboard.tgz` file as shown below. Do not change this setting. + +```nginx +app_protect_security_log "/etc/nms/secops_dashboard.tgz" syslog:server=127.0.0.1:514; +``` + +Refer to the [Security Monitoring setup guide]({{< relref "/nim/monitoring/security-monitoring/configure/set-up-app-protect-instances" >}}) to learn more. {{}} + +{{}} +NGINX configuration for NGINX App Protect Version 5 requires the following changes: + +- The `app_protect_enforcer_address` directive must be included within the `http` context of the NGINX configuration: + + ```nginx + app_protect_enforcer_address 127.0.0.1:50000; + ``` + +- JSON policies and log profiles are not supported for Version 5, so all policies and log profiles must be precompiled and the `precompiled_publication` attribute in the NGINX Agent configuration must be set to `true`. + +Refer to the [NGINX App Protect WAF Configuration Guide](https://docs.nginx.com/nginx-app-protect-waf/v5/configuration-guide/configuration/) to learn more. +{{}} + +Additional example configurations tailored for NGINX features can be found in the [NGINX App Protect WAF Configuration Guide](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#interaction-with-nginx-features). + +
    + +{{}} +{{%tab name="UI"%}} + +1. {{< include "nim/webui-nim-login.md" >}} +2. On the left menu, select **Instances** or **Instance Groups**. +3. Select **Edit Config** from the **Actions** menu (represented by an ellipsis, `...`) for the desired instance or instance group. +4. If precompiled publication is enabled, change the file extension from `.json` to `.tgz` if there are any referenced security policies in the file. +5. If you want to apply the default security policy, select **Apply Security** and then copy the desired policy snippet by selecting **Copy**. +6. Paste the snippet into an `http`, `server`, or `location` context in the configuration. + + If multiple policies have been published, the most granular policy will apply. + +7. Select **Publish** to immediately push the updated configuration to the selected instance or instance group. + +{{%/tab%}} +{{%tab name="API"%}} + +{{< see-also >}}{{< include "nim/how-to-access-nim-api.md" >}}{{< /see-also >}} + + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------------------| +| GET | `/api/platform/v1/systems/{systemUID}/instances` | +| POST | `/api/platform/v1/security/{systemUID}/instances/{nginxUID}/config` | + +{{}} + + +1. Send an HTTP `GET` request to the `/api/platform/v1/systems/{systemUID}/instances` endpoint. This returns a list of all instances from which you can find the unique identifier (UID) of the instance that you want to update. +1. Add the desired configurations to your `nginx.conf` file, or to any other configuration file that's within the context defined in the NGINX Agent `config_dirs` setting. + + - At a minimum, you should add `app_protect_enable on;`. + - If precompiled publication is enabled, change the file extension from `.json` to `.tgz` if there are any referenced security policies in the file. + - If you'd like to use the default security policies, paste either of the policy snippets below into an `http`, `server`, or `location` context in the configuration file. The most granular policy will be applied if multiple policies have been published. + + ``` nginx + app_protect_policy_file /etc/nms/NginxDefaultPolicy.tgz; + app_protect_policy_file /etc/nms/NginxStrictPolicy.tgz; + ``` + +1. Encode the configuration file using base64: + + ``` bash + base64 -i /etc/nginx/nginx.conf + ``` + +1. Provide the encoded string in `configFiles.files.contents`, as shown below. + + > **Important!** Before deploying an updated configuration to an instance group, ensure that all instances in the group have the same version of NGINX App Protect WAF installed. Otherwise, the deployment may fail. + + ```shell + curl -X POST https://{{NMS_FQDN}}/api/platform/v1/systems/{systemUID}/instances/{nginxUID}/config -H "Authorization: Bearer " \ + --Content-Type application/json -d @ + ``` + +
    + JSON Response + + ```json + { + "auxFiles": { + "files": [ + { + "contents": "PCFET0NUWVBFIGh0bWw+CjxodG1sPgo8aGVhZD4KP", + "name": "/var/www/html/index.nginx-debian.html" + } + ], + + "rootDir": "/" + }, + + "configFiles": { + "files": [ + { + "contents": "dXNlciB3d3ctZGF0YTsKd29ya2VyX3Byb2Nlc3Nlc", + "name": "nginx.conf" + } + ], + + "rootDir": "/etc/nginx" + }, + + "configUID": "", + "ignoreConflict": false, + "validateConfig": true + } + ``` + +
    + +{{%/tab%}} + +{{
    }} + +### Verify Configuration + +Once you have added the NGINX App Protect WAF directives to your NGINX configuration, you should see the NGINX App Protect WAF status reported as "Active". Take the steps below to verify the configuration in the Instance Manager web interface. + +1. {{< include "nim/webui-nim-login.md" >}} +1. Select **Instances**. +1. You should see the installed version listed in the **NGINX App Protect** column. +1. Select the instance, then scroll to the **App Protect Details** section. There, you should see the "App Protect WAF" status is "Active". The "Build" should match the version installed on the instance. + +--- + +## Troubleshooting + +If you're having issues with NGINX App Protect WAF, we suggest trying the following troubleshooting steps. If none of them helps, please reach out to NGINX Customer Support for further assistance. + +
    +Verify that NGINX App Protect WAF is not installed on the NGINX Management Suite host + +To ensure no library conflicts arise when installing `nms-nap-compiler`, verify that NGINX App Protect WAF is not installed on the NGINX Management Suite host. You can do this by taking the following steps: + +1. Open an SSH connection to your NGINX Management Suite host and log in. +2. Run the following command: + + - Debian-based distributions, run `dpkg -s app-protect` + - RPM-based distributions, run `rpm -qi grep app-protect` + +If NGINX App Protect WAF is installed, you'll need to [uninstall it](https://docs.nginx.com/nginx-app-protect-waf/admin-guide/install/#uninstall-app-protect). + +
    + +
    +Verify the WAF compiler version and NGINX App Protect version match + +Each NGINX App Protect WAF version has a corresponding version of the WAF compiler. You must install the correct WAF compiler version for the version of NGINX App Protect WAF running on the managed instance(s). Refer to [WAF Compiler and Supported App Protect Versions]( #nap-waf-compiler-compatibility) for compatibility details. + +To view the installed version of the WAF compiler: + +1. Open an SSH connection to your NGINX Management Suite host and log in. +2. Run the following command: + + ```shell + ls -l /opt/nms-nap-compiler + ``` + +
    + +
    +Verify the WAF compiler is running properly + +Check if the WAF compiler has been installed and is working properly by viewing the command-line help: + +```bash +sudo /opt/nms-nap-compiler/app_protect-/bin/apcompile -h +``` + +For example, to view the help description for WAF compiler 5.210.0, run the following command: + +``` bash +sudo /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -h +``` + +The output looks similar to the following example: + +```text +USAGE: + /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile + +Examples: + /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -p /path/to/policy.json -o mypolicy.tgz + /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -p policyA.json -g myglobal.json -o /path/to/policyA_bundle.tgz + /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -g myglobalsettings.json --global-state-outfile /path/to/myglobalstate.tgz + /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -b /path/to/policy_bundle.tgz --dump + /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -l logprofA.json -o /path/to/logprofA_bundle.tgz +... +``` + +
    + +
    + +
    +Verify NGINX Agent configuration on NGINX App Protect WAF instance + +Configure NGINX Agent on your NGINX App Protect WAF instance with settings similar to the following example: + +"/etc/nginx-agent/nginx-agent.conf" + +```yaml +# path to aux file dirs can also be added +config_dirs: "/etc/nginx:/usr/local/etc/nginx:/usr/share/nginx/modules:/etc/nms:/etc/app_protect" + +# Enable necessary NAP extensions +extensions: + - nginx-app-protect + - nap-monitoring + +nginx_app_protect: + # Report interval for NGINX App Protect details - the frequency the NGINX Agent checks NGINX App Protect for changes. + report_interval: 15s + # Enable precompiled publication from the NGINX Management Suite (true) or perform compilation on the data plane host (false). + precompiled_publication: true + +nap_monitoring: + # Buffer size for collector. Will contain log lines and parsed log lines + collector_buffer_size: 50000 + # Buffer size for processor. Will contain log lines and parsed log lines + processor_buffer_size: 50000 + # Syslog server IP address the collector will be listening to + syslog_ip: "127.0.0.1" + # Syslog server port the collector will be listening to + syslog_port: 514 +``` + +
    + +
    + +
    +Verify access to the NGINX packages repository + +To allow Instance Manager to automatically download the latest Attack Signatures and Threat Campaigns, you need to [upload the certificate and key files]({{< relref "/nim/nginx-app-protect/setup-waf-config-management.md#upload-nginx-app-protect-waf-certificate-and-key" >}}) included with your subscription to allow access to the package repository. + +If you already uploaded your certificate and key files, use the command below to verify that they allow access to the package repo: + +```bash +curl --key /etc/ssl/nginx/nginx-repo.key --cert /etc/ssl/nginx/nginx-repo.crt https://pkgs.nginx.com/app-protect-security-updates/index.xml +``` + +
    + +The output looks similar to the following example: + +``` text +... + + + + + + app-protect-attack-signatures + x86_64 + + + + + app-protect-attack-signatures + x86_64 + + + +... +``` + +
    + +--- + +## What's Next + +Now that configuration management is set up, you can use the Instance Manager REST API to manage security policies, view system information about your NGINX App Protect WAF instances, and update Attack Signatures and Threat Campaigns. Learn more in [Manage App Protect WAF Configuration using the REST API]({{< relref "manage-waf-security-policies" >}}). diff --git a/content/nim/nginx-app-protect/waf-config-management.md b/content/nim/nginx-app-protect/waf-config-management.md new file mode 100644 index 000000000..6f7543aaa --- /dev/null +++ b/content/nim/nginx-app-protect/waf-config-management.md @@ -0,0 +1,32 @@ +--- +description: Learn how to use NGINX Management Suite Instance Manager to publish NGINX + App Protect WAF configurations. +docs: DOCS-1114 +tags: +- docs +title: WAF Configuration Management +toc: true +weight: 100 +--- + +## Overview + +You can use NGINX Management Suite Instance Manager to publish configurations to your NGINX App Protect WAF data plane instances. + +## Publish WAF Configurations + +1. Set up your NGINX Management Suite Instance Manager instance: + + - [Install the WAF Compiler]({{< relref "/nim/nginx-app-protect/setup-waf-config-management#install-the-waf-compiler" >}}) + + - [Set up the Attack Signatures and Threat Campaigns]({{< relref "/nim/nginx-app-protect/setup-waf-config-management#set-up-attack-signatures-and-threat-campaigns" >}}) + +2. In Instance Manager, [onboard the App Protect Instances]({{< relref "/nim/nginx-app-protect/setup-waf-config-management#onboard-nginx-app-protect-waf-instances" >}}) you want to publish policies and log profiles to. + +3. [Create the security policies]({{< relref "/nim/nginx-app-protect/manage-waf-security-policies#create-security-policy" >}}). + +4. [Create the security log profiles]({{< relref "/nim/nginx-app-protect/manage-waf-security-policies#create-security-log-profile" >}}). + +5. [Add or edit a WAF Configuration]({{< relref "/nim/nginx-app-protect/setup-waf-config-management#add-waf-config" >}}) to your NGINX Instances, and publish using Instance Manager. + + {{}}Map the App Protect directives on NGINX configuration to `.tgz` file extensions (not `.json`).{{< /note >}} diff --git a/content/nim/nginx-configs/_index.md b/content/nim/nginx-configs/_index.md new file mode 100644 index 000000000..68a09440c --- /dev/null +++ b/content/nim/nginx-configs/_index.md @@ -0,0 +1,6 @@ +--- +title: NGINX configs +weight: 15 +url: /nginx-instance-manager/nginx-configs/ +weight: 70 +--- \ No newline at end of file diff --git a/content/nim/nginx-configs/config-templates/_index.md b/content/nim/nginx-configs/config-templates/_index.md new file mode 100644 index 000000000..3192ad849 --- /dev/null +++ b/content/nim/nginx-configs/config-templates/_index.md @@ -0,0 +1,5 @@ +--- +title: NGINX config templates +weight: 20 +url: /nginx-instance-manager/nginx-configs/config-templates/ +--- \ No newline at end of file diff --git a/content/nim/nginx-configs/config-templates/concepts/_index.md b/content/nim/nginx-configs/config-templates/concepts/_index.md new file mode 100644 index 000000000..ddcdb5ad0 --- /dev/null +++ b/content/nim/nginx-configs/config-templates/concepts/_index.md @@ -0,0 +1,8 @@ +--- +description: "" +title: Overview +weight: 10 +url: /nginx-instance-manager/nginx-configs/config-templates/concepts +--- + + diff --git a/content/nim/nginx-configs/config-templates/concepts/augment-templates.md b/content/nim/nginx-configs/config-templates/concepts/augment-templates.md new file mode 100644 index 000000000..52a1b1e2a --- /dev/null +++ b/content/nim/nginx-configs/config-templates/concepts/augment-templates.md @@ -0,0 +1,70 @@ +--- +title: "Augment templates" +date: 2024-04-17T14:00:14-07:00 +# Change draft status to false to publish doc +draft: false +# Description +# Add a short description (150 chars) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +weight: 120 +toc: true +tags: [ "docs" ] +# Create a new entry in the Jira DOCS Catalog and add the ticket ID (DOCS-) below +docs: "DOCS-1656" +# Taxonomies +# These are pre-populated with all available terms for your convenience. +# Remove all terms that do not apply. +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["concept"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] +--- + +## Overview {#augmenting-global-default-base-template} + +Using augment templates in F5 NGINX Instance Manager, administrators and developers can customize NGINX configurations beyond the foundational settings of the F5 Global Default Base template. You can use the augment template to add specific features like OIDC authentication, or segment (compartmentalize) configuration elements like location and server blocks. Augment templates can be combined with a base template to build upon it or applied directly to an existing NGINX configuration. + +## Understanding augment templates + +Augment templates add specific features or modify existing configurations generated by the base template. They can be categorized into two main types: + +- **Feature augments**: Add specific features such as caching, authentication, or rate limiting to the NGINX configuration. +- **Segment augments**: Modify or add configuration segments, such as additional server blocks, location directives, or upstream definitions. + +## How to use augment templates + +{{}} + For instructions on setting up and deploying config templates, including augment templates, see [Manage NGINX Configs with Templates]({{< relref "/nim/nginx-configs/config-templates/how-to/manage-nginx-configs-with-templates.md" >}}).{{}} + +1. **Identify needs**: Determine the additional functionalities or configuration segments needed beyond the base template. This could include specific NGINX modules, security enhancements, or custom routing requirements. + +2. **Select or create augments**: Choose from existing augment templates provided by NGINX Instance Manager or create custom augment templates to meet your specific needs. Custom augment templates can be developed by defining JSON schemas and corresponding `.tmpl` files, similar to the base template. + +3. **Apply augments**: Use the NGINX Instance Manager interface to apply the selected augment templates to your NGINX configuration. This can be done by specifying the augment templates to be included when [previewing and generating the config]({{< relref "/nim/nginx-configs/config-templates/how-to/manage-nginx-configs-with-templates.md#preview-generate-config" >}}). + +4. **Customize inputs**: Provide any required inputs for the augment templates. This may involve specifying parameters such as paths, server names, or authentication keys, depending on the functionality being added. + +5. **Generate and deploy**: Once the augment templates and their inputs have been defined, generate the final NGINX configuration. Review the generated configuration to ensure it meets your requirements before deploying it to your NGINX instances. + + +## Benefits of using augment templates + +- **Modularity**: Augment templates allow for a modular approach to configuring NGINX, enabling users to mix and match features as needed without altering the base configuration. This modularity extends to role-based access control (RBAC), allowing administrators to define specific permissions for who can modify and submit these templates. +- **Flexibility**: By separating specific functionalities into augment templates, configurations can be easily customized and updated to adapt to changing requirements. +- **Simplification**: Augments abstract the complexity of NGINX configuration directives, offering a simplified interface for adding features and making modifications. +- **Reusability**: Augment templates can be reused across different NGINX configurations, promoting consistency and efficiency in managing NGINX instances. + +### Best practices for augmenting configurations + +- **Test augments separately**: Before applying augment templates to production configurations, test them in a separate environment to ensure they function as expected. +- **Documentation**: Document the purpose and inputs of custom augment templates to facilitate understanding and usage by other team members. + +--- + +## Additional Templating Resources + +{{< include "nim/templates/additional-templating-resources.md" >}} \ No newline at end of file diff --git a/content/nim/nginx-configs/config-templates/concepts/config-templates.md b/content/nim/nginx-configs/config-templates/concepts/config-templates.md new file mode 100644 index 000000000..979c2f160 --- /dev/null +++ b/content/nim/nginx-configs/config-templates/concepts/config-templates.md @@ -0,0 +1,85 @@ +--- +title: "About NGINX config templates" +date: 2024-03-11T14:03:20-07:00 +# Change draft status to false to publish doc +draft: false +# Description +# Add a short description (150 chars) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +weight: 100 +toc: true +tags: [ "docs" ] +# Create a new entry in the Jira DOCS Catalog and add the ticket ID (DOCS-) below +docs: "DOCS-1502" +# Taxonomies +# These are pre-populated with all available terms for your convenience. +# Remove all terms that do not apply. +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["concept"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] +--- + +## Config templates + +F5 NGINX Instance Manager uses [Go templating](https://pkg.go.dev/text/template) to simplify creating and standardizing NGINX configurations. These config templates create an abstraction layer for NGINX configuration files, enabling users to provide parameters to generate a working configuration without needing a deep knowledge of NGINX syntax. These templates simplify configuring NGINX, enforce best practices for configurations, and enable self-service permissions for app development. + +{{}} + Go templating in Instance Manager includes support for the [Sprig function library](https://masterminds.github.io/sprig/), offering a wide range of additional functions that can be used in templates for advanced operations like string manipulation, data conversion, mathematics, and more. {{}} + +### Types of templates {#template-types} + +Configuration templates come in two types: + +- **Base templates**: A base template is a comprehensive set of instructions used to generate a complete NGINX configuration. It includes all the necessary directives and parameters to create a functional NGINX configuration from scratch. Essentially, it’s the foundational configuration on which your NGINX instance operates. + +- **Augment templates**: Augment templates are designed to add specific features or modify existing configurations generated by the base template. They include: + + - **Feature augments**: These add specific functionalities such as caching, authentication, or rate limiting to the NGINX configuration. + - **Segment augments**: These modify or add configuration segments like additional server blocks, location directives, or upstream definitions. + +### Template resource files {#template-resources} + +Configuration templates include the following components: + +- **Template files (.tmpl)**: Written in Go's templating language, these files define the NGINX configuration's structure and parameters. + +- **JSON schema files (.json)**: These files create the rules for validating user inputs and generate dynamic web forms for data entry. + +- **Auxiliary files**: Additional files required for configuration, such as JavaScript for added functionality, security certificates, or documentation (README.md). These files support the main configuration and provide necessary context or capabilities. + +To learn more about the resources mentioned, refer to the [Template Resource Files]({{< relref "nim/nginx-configs/config-templates/concepts/template-resources.md" >}}) topic. + +## Target {#target} + +A target refers to the specific NGINX server instance, instance group, or staged config where a template (base or augment) is intended to be applied. It's the designated location or context within which the generated configuration will be active and operational. + +There are three types of targets: + +1. **Individual NGINX instance**: Targets a single server, allowing for precise configuration updates or replacements. + +2. **Instance group**: A collection of NGINX instances managed as a single group. Applying a template to an instance group ensures uniform configuration across all its servers. + +3. **Staged config**: A staging area for configurations before deployment, allowing for testing and validation to minimize potential disruptions upon live deployment. + +## Template submission {#template-submission} + +Template submission involves applying a set of configurations (derived from base and/or augment templates) to a target. This action takes the parameters defined in the templates, generates the final NGINX configuration, and deploys it to the specified target. RBAC plays a pivotal role here, determining who can submit and modify template submissions. Template submission effectively bridges the gap between configuration design and operational use. + +Key aspects of template submission include: + +- **Snapshots**: Snapshots are created when templates are submitted. Snapshots capture the state of the template and its inputs at the time of submission. This includes all the settings, parameters, and the structure defined in both base and augment templates. By creating a snapshot, NGINX Instance Manager preserves a record of the exact configuration applied to a target at a specific point in time. This is crucial for auditing purposes, rollback scenarios, and understanding the evolution of a server's configuration. + +- **Target application**: When submitting a template, it's important to specify the target accurately. The target is the NGINX instance, instance group, or staged config where the generated configuration will be applied. Misidentifying the target can lead to configurations being deployed to unintended environments, potentially causing disruptions. + +- **Role-based access control (RBAC)**: With RBAC, administrators can limit who can create and modify template submissions based on team roles or individual responsibilities, ensuring only authorized users can change NGINX configurations. + +--- + +## Additional Templating Resources {#additional-resources} + +{{< include "nim/templates/additional-templating-resources.md" >}} diff --git a/content/nim/nginx-configs/config-templates/concepts/default-base-template.md b/content/nim/nginx-configs/config-templates/concepts/default-base-template.md new file mode 100644 index 000000000..b9443a94b --- /dev/null +++ b/content/nim/nginx-configs/config-templates/concepts/default-base-template.md @@ -0,0 +1,54 @@ +--- +title: "F5 Global Default base template" +date: 2024-03-28T14:22:17-07:00 +draft: false +description: "" +weight: 110 +toc: true +tags: [ "docs" ] +docs: "DOCS-1503" +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["reference"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] +--- + +## Overview + +The F5 Global Default Base template includes all the essential elements needed to build NGINX configurations using templates. It simplifies complex directives into user-friendly parameters, helping those new to NGINX generate configurations quickly. While **this default template alone will not create a deployable NGINX configuration**, it offers a clear example of how to use modular components and injectable templates for augmenting functionality and the simplified user interface that comes along with that. Using these patterns, you can copy and modify the base template to create custom templates tailored to your specific needs. + +## Key components + +The default base template comprises several core components, each serving a specific role in the configuration process: + +1. **Base template (`base.tmpl`)**: Establishes fundamental settings like user context, worker processes, and logging. It includes blocks for **main**, **HTTP**, and **stream** configurations and conditionally loads additional modules based on provided inputs. + +2. **HTTP server inputs (`http-server.json`)**: Defines parameters for HTTP server block configurations, including server names, facilitating the creation of customized server blocks within the HTTP context. + +3. **Location inputs (`location.json`)**: Outlines parameters for configuring location blocks, allowing detailed customization of routing and request handling within server blocks. + +4. **Stream upstream inputs (`stream-upstream.json`)** and **HTTP upstream inputs (`http-upstream.json`)**: Provide schemas for upstream configurations in both HTTP and stream contexts, specifying upstream names and enabling dynamic generation of upstream blocks. + +5. **Module options (`main.json`)**: Offers options for module loading, including conditions to exclude specific modules, ensuring flexibility in module management and error prevention during configuration testing. + +6. **Stream server inputs (`stream-server.json`)**: Describes parameters for stream server configurations, including UDP settings, ports, and proxy pass details, supporting the generation of stream server blocks tailored to specific requirements. + +## Template customization and usage + +The template uses JSON schema files to validate the input and provide a user-friendly interface for customizing NGINX configurations. Each schema corresponds to a different configuration aspect, allowing users to define server, location, and upstream settings without editing NGINX config files directly. + +### Conditional module loading + +The template conditionally loads modules based on user inputs, optimizing the configuration by including only necessary modules. This makes the configuration process more efficient and enhances the performance and security of NGINX instances. + +### Dynamic configuration generation + +The template dynamically generates configuration blocks for HTTP and stream contexts, incorporating server blocks, location directives, upstream configurations, and proxy pass settings based on the provided inputs. This modular and dynamic approach facilitates the rapid deployment of customized NGINX configurations, catering to a wide range of deployment scenarios. + +--- + +## Additional Templating Resources + +{{< include "nim/templates/additional-templating-resources.md" >}} diff --git a/content/nim/nginx-configs/config-templates/concepts/template-resources.md b/content/nim/nginx-configs/config-templates/concepts/template-resources.md new file mode 100644 index 000000000..c966f5e94 --- /dev/null +++ b/content/nim/nginx-configs/config-templates/concepts/template-resources.md @@ -0,0 +1,149 @@ +--- +title: "Template resource files" +date: 2024-03-19T12:23:28-07:00 +# Change draft status to false to publish doc +draft: false +# Description +# Add a short description (150 chars) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +weight: 200 +toc: true +tags: [ "docs" ] +# Create a new entry in the Jira DOCS Catalog and add the ticket ID (DOCS-) below +docs: "DOCS-1501" +# Taxonomies +# These are pre-populated with all available terms for your convenience. +# Remove all terms that do not apply. +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["reference"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] +--- + +## Config Template, Schema, and README Files + +F5 NGINX Instance Manager uses [Go templating](https://pkg.go.dev/text/template) and JSON schemas to create flexible and robust NGINX configuration templates. This allows users to efficiently customize and validate configurations without needing expert knowledge of NGINX syntax. + +{{}} + Go templating in Instance Manager includes support for the [Sprig function library](https://masterminds.github.io/sprig/), offering a wide range of additional functions that can be used in templates for advanced operations like string manipulation, data conversion, mathematics, and more. {{}} + +This guide covers the following resource files for creating templates: + +- **Config files** (`*.tmpl`), which are Go templates that define the NGINX configuration's structure and parameters. +- **Schema files** (`*.json`), which are JSON schemas specifing the rules for input validation and providing the structure for dynamic web forms. +- **README.md** files, which document the template's purpose, usage, and capabilities. + +If you're creating templates from scratch, the following table lists the acceptable template and schama filenames to use: + +{{}} +| Config File | Applicable Type(s) | Schema File(s) | Purpose | +|------------------------|--------------------|---------------------------------------------------|----------------------------------------------------------------------------------------------------------------------| +| **base.tmpl** | base | **main.json**
    **http.json**
    **http-server.json**
    **http-upstream.json**
    **location.json**
    **stream.json**
    **stream-server.json**
    **stream-upstream.json** |

    Required for templates designated as base. The schema files are optional for templates that don't require user inputs.

    The template file should include all directives needed to create a complete NGINX configuration, such as **main**, **http**, and **stream**. Also, it should have specific Go templating language commands to insert dynamic configuration details into the right sections or directive blocks.

    Example for **main** directive block:
    `{{ $input.ExecTemplateAugments "main" }}`

    +| **main.tmpl** | augment | **main.json** | Contains configuration and schema inputs for the **main** directive block. The schema file is optional for templates that don't require user inputs. See the full [alphabetical list of directives](https://nginx.org/en/docs/dirindex.html) | +| **http.tmpl** | augment | **http.json** | Contains configuration and schema inputs for the [HTTP directive](https://nginx.org/en/docs/http/ngx_http_core_module.html#http) block. The schema file is optional for templates that don't require user inputs.| +| **http-server.tmpl** | augment | **http-server.json** | Contains configuration and schema inputs for the [HTTP server directive](https://nginx.org/en/docs/http/ngx_http_core_module.html#server) block. The schema file is optional for templates that don't require user inputs. | +| **http-upstream.tmpl** | augment | **http-upstream.json** | Contains configuration and schema inputs for the [HTTP upstream directive](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) block. The schema file is optional for templates that don't require user inputs. | +| **location.tmpl** | augment | **location.json** | Contains configuration and schema inputs for the [HTTP server location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) directive block. The schema file is optional for templates that don't require user inputs. | +| **stream.tmpl** | augment | **stream.json** | Contains configuration and schema inputs for the [stream directive](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) block. The schema file is optional for templates that don't require user inputs. | +| **stream-server.tmpl** | augment | **stream-server.json** | Contains configuration and schema inputs for the [stream server directive](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server) block. The schema file is optional for templates that don't require user inputs. | +| **stream‑upstream.tmpl** | augment | **stream‑upstream.json** | Contains configuration and schema inputs for the [stream upstream directive](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#upstream) block. The schema file is optional for templates that don't require user inputs. | +| **README.md** | base, augment | n/a | Provides documentation, usage instructions, and an overview of the template. | +{{
    }} + +
    + +## Example: Enable OIDC with main.tmpl + +This example shows how to enable OIDC authentication in NGINX using a config template and user input. This templated approach with JSON schema validation simplifies updating the NGINX configuration and reduces the risk of errors. + +- **main.tmpl** + + A template file, **main.tmpl**, checks if OIDC (OpenID Connect) authentication should be enabled in the NGINX configuration based on user input. If the input enables OIDC, the template instructs NGINX to load the **ngx_http_js_module.so** module necessary for OIDC functionality. + + ``` go + {{$input := .}} + {{$baseData := .Data.V1}} + {{$oidcEnabled := false}} + + {{if $baseData.main}} + {{if $baseData.main.templateInput}} + {{$oidc := index $baseData.main.templateInput "oidc"}} + {{if $oidc}} + {{$oidcEnabled = index $oidc "enabled"}} + {{end}} + {{end}} + + {{if eq $oidcEnabled true}} + load_module modules/ngx_http_js_module.so; + {{end}} + {{end}} + ``` + +- **main.json** + + A JSON schema file, **main.json**, validates user inputs for **main.tmpl**, ensuring that the **oidc** object's **enabled** property is correctly defined and used. The **templateInput** root property is required and will be validated when analyzing the template. + + ``` json + { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "type": "object", + "title": "Main inputs", + "properties": { + "templateInput": { + "type": [ + "object", + "null" + ], + "properties": { + "oidc": { + "type": "object", + "title": "OIDC Setting", + "description": "Adds OIDC settings to main, http, http-server and location blocks. Use OIDC to enable single sign-on (SSO) to authenticate to your app.", + "__docs": [ + { + "title": "OIDC specification", + "link": "https://openid.net/specs/openid-connect-core-1_0.html" + } + ], + "properties": { + "enabled": { + "title": "OIDC authentication enabled", + "type": "boolean", + "description": "Set this value to be true, if location(s) can be accessed only after authentication by the OIDC provider configured at the gateway. If no authentication is needed set this value to be false.", + "examples": [ + true + ] + } + }, + "required": [ + "enabled" + ] + } + } + } + }, + "required": [] + } + ``` + +
    + + An explanation of the JSON schema settings: + + - **Root property for `templateInput`**: The JSON schema defines a root property named `templateInput`. This property is necessary because it contains all the customizable parameters the user can provide for the template. The schema specifies that `templateInput` can be either an object (which holds configuration parameters) or null (indicating the absence of such parameters). + + - **OIDC object validation**: Within `templateInput`, there's a specific object named `oidc` meant to configure OIDC-related settings. The schema for the `oidc` object includes a boolean property named `enabled`. This property controls whether OIDC authentication is turned on (`true`) or off (`false`) for the NGINX configuration being generated. + + - **Input-validation for `oidc` object**: The `main.json` provides rules for validating the `oidc` object's inputs. For example, the `enabled` property within the `oidc` object must be a boolean. This ensures that the template receives correctly typed and structured data to generate the configuration correctly. + + - **Required properties**: The schema declares that within the `oidc` object, the `enabled` property is mandatory (`"required": ["enabled"]`). This means that any input provided for the `oidc` object must include a clear true/false value for `enabled`. + +--- + +## Additional Templating Resources + +{{< include "nim/templates/additional-templating-resources.md" >}} diff --git a/content/nim/nginx-configs/config-templates/how-to/_index.md b/content/nim/nginx-configs/config-templates/how-to/_index.md new file mode 100644 index 000000000..4ac58751b --- /dev/null +++ b/content/nim/nginx-configs/config-templates/how-to/_index.md @@ -0,0 +1,8 @@ +--- +description: "" +title: How To +weight: 20 +url: /nginx-instance-manager/nginx-configs/config-templates/how-to/ +--- + + diff --git a/content/nim/nginx-configs/config-templates/how-to/manage-nginx-configs-with-templates.md b/content/nim/nginx-configs/config-templates/how-to/manage-nginx-configs-with-templates.md new file mode 100644 index 000000000..12dec5a1e --- /dev/null +++ b/content/nim/nginx-configs/config-templates/how-to/manage-nginx-configs-with-templates.md @@ -0,0 +1,181 @@ +--- +title: "Manage NGINX configs with templates" +date: 2024-03-12T15:51:04-07:00 +# Change draft status to false to publish doc. +draft: false +# Description +# Add a short description (150 chars) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +weight: 200 +toc: true +tags: [ "docs" ] +# Create a new entry in the Jira DOCS Catalog and add the ticket ID (DOCS-) below +docs: "DOCS-1506" +# Taxonomies +# These are pre-populated with all available terms for your convenience. +# Remove all terms that do not apply. +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["task"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] + +--- + +## Create a Config Template + +Config templates in F5 NGINX Instance Manager allow you to predefine and standardize configurations for your NGINX instances. By creating a template, you can streamline the deployment process and ensure consistency across your environments. This guide walks you through creating a new template from scratch or importing an existing template from an archive, generating and deploying a configuration to a designated target, and modifying an existing template submission. + +### Import Config Templates from an Archive + +When importing a config template from an archive, ensure your `.tar.gz` file matches to the following structure: + +- Each template must be in its own directory within the archive. +- Directories should contain all related template files (.tmpl for templates, .json for JSON schemas). +- Directories should have a `meta.json` file that includes metadata such as the template's name, description, author, type, and unique identifier (UID). + +#### Required archive structure + +``` text +.tar.gz +│ +├── / +│ ├── .tmpl +│ ├── .json +│ └── meta.json +│ +├── / +│ ├── .tmpl +│ ├── .json +│ └── meta.json +│ +└── ... +``` + +#### Example meta.json file + +``` json +{ + "meta_version_num": 1, + "name": "example_template", + "description": "This is an example NGINX template.", + "author": "Your Name", + "type": "base", // Could be "base" or "augment" + "uid": "unique-identifier-uuid-goes-here" +} +``` + +_Replace the placeholder values with the actual details of your template._ + +
    + +To import an existing template from a `.tar.gz` archive file: + +1. Open your web browser, go to the Fully Qualified Domain Name (FQDN) of your NGINX Management Suite host, and log in. +2. From the Launchpad menu, choose **Instance Manager**. +3. In the left navigation pane, select **Templates > Overview**. +4. On the Config Templates "Overview" page, select **Create**. +5. In the **Create Template** dialog, select **Import**. +6. Drag and drop your `.tar.gz` archive file onto the web form or select **Browse** to find and select your file. +7. Once the file is uploaded, select **Parse** to inspect the archive. +8. If an error message appears indicating the archive is unsigned, and you recognize and trust the source of the file, select the checkbox **Allow Signature Bypass**. +9. Select **Import** to finish importing the templates. + +{{}} Make sure you validate the source of the archive before bypassing the signature requirement to maintain the security of your system.{{}} + +### Create a Config Template from Scratch + +To create a new config template: + +1. Open your web browser, go to the Fully Qualified Domain Name (FQDN) of your NGINX Management Suite host, and log in. +2. From the Launchpad menu, choose **Instance Manager**. +3. In the left navigation pane, select **Templates**. +4. On the Config Templates "Overview" page, select **Create**. +5. In the **Create Template** dialog: + - Select **New** to start a fresh template. + - Enter a unique and descriptive name for your template in the **Name** field. + - (Optional) Provide a description in the **Description** field to give more context about the template's purpose or usage. + - Choose the template's **State** to indicate its readiness: + - **Draft**: Indicates that the template is still under development, editable, and not finalized for use. + - **Ready for Use**: Means the template is finalized, locked from further editing, and ready to be applied or submitted. + - Specify the **Type** of template you are creating: + - **Base**: Select this option if the template will serve as a comprehensive starting point, containing all the directives needed for a standalone NGINX configuration. + - **Augment**: Choose this if the template will add to or enhance an existing configuration by introducing additional directives or settings. +6. Click **Submit** to create the template. + +After creating a template, you'll need to add [resource files]({{< relref "/nim/nginx-configs/config-templates/concepts/template-resources.md" >}}) to define its structure and behavior: + +- **Config File (.tmpl)**: This is the core template file that defines the structure and parameters of your NGINX configuration. If you're creating a base template, you'll add a **base.tmpl** file. For augment templates, select the appropriate augment file that corresponds to the functionality you wish to include. +- **JSON Schema File (.json)**: JSON schema files are necessary for creating the dynamic web forms in the UI that users will interact with. These schemas define the valid format and type of user inputs for the template. +- **Aux File**: Aux files are additional resources necessary for the NGINX configuration to function properly. They may include JavaScript files, SSL certificates, MIME type definitions, and any other required configuration assets. +- **Docs (README.md)**: Documentation files like **README.md** provide essential information, usage instructions, and a high-level description of what the template does and how to use it. + +To add resource files to a template: + +1. On the **Templates > Overview** page, locate the newly created template. In the **Actions** column, select the ellipsis (three dots), then choose **Edit Template Files**. +2. In the config editor, select **Add file**. +3. Choose the type of file you want to add to the template. Depending on the file type, you'll be presented with a list of file options to choose from. +4. Select the file names you want to add to your template. +5. After selecting all the necessary files, click **Add** to include them in the template. +6. The selected files will now appear in the template's directory structure on the left side of the editor. Select a file to edit its contents in the editing pane. +8. Make your changes and select **Save** to update the template with your configurations. + +{{}} +You can find more information about template types, template resource files, and JSON schema features in the [Additional Resources](#additional-resources) section.{{}} + +--- + +## Preview, Generate, and Submit a Config from a Template {#preview-generate-config} + +Previewing a config from a template lets you see how your NGINX configurations will look before publishing them. During this step, you'll make sure all the inputs and augmentations are correct. Once you're satisfied with the preview, you can generate the config and submit it. + +To preview, generate, and submit a config from a template: + +1. Open your web browser, go to the Fully Qualified Domain Name (FQDN) of your NGINX Management Suite host, and log in. +2. Select **Instance Manager** from the LaunchPad. +3. On the left sidebar, select **Templates**. +4. Locate the desired template on the "Overview" page and select the ellipsis (three dots) in the **Actions** column, then select **Preview and Generate**. +5. Complete the forms on the **Preview and Generate Config** dialog in sequence, selecting **Next** to move forward: + - **Choose Publish Options**: Specify where to publish the template by selecting either: + - **Publish to an Instance**: To apply the configuration to a single NGINX instance. + - **Publish to an Instance Group**: To apply the configuration to multiple instances managed as an instance group. + - **Save to a Staged Config**: To stage the configuration for future deployment. + - **Save as a New Staged Config**: To create a brand new staged configuration for later use. + - **Augments** (Optional): Include any augment templates needed to enhance a base configuration with additional features. + - **Base and Augment Inputs**: Enter the required configuration inputs for the chosen templates. + - **Preview Config**: Use the filename dropdown to review the output for each configuration. +6. After verifying the configurations, select **Publish**. If you've published to an instance or instance group, the template submission will tracked on the **Template Submissions** page. +7. Once the submission is accepted and confirmed, select **Close and Exit**. + +--- + +## Editing a Template Submission + +{{}} +When you edit a template submission, it is important to note that the current NGINX configuration, alongside all previous submissions and inputs for your target, will be replaced. This ensures that your NGINX instances always run the most up-to-date configurations derived from the latest inputs. +{{}} + +When managing your NGINX configurations, you might find that certain parameters need updating to keep up with changing requirements. Rather than creating a new template submission for every minor adjustment, you have the flexibility to edit the latest template submission. This process allows you to modify the existing inputs to the preferred settings, providing a streamlined approach to configuration management. + +To edit a template submission: + +1. Open your web browser, go to the Fully Qualified Domain Name (FQDN) of your NGINX Management Suite host, and log in. +2. Select **Instance Manager** from the LaunchPad. +3. On the left sidebar, select **Template Submissions** to view a list of all template submissions. +4. Locate and select the template submission you want to edit. +5. Select the tab that corresponds to the type of target for your template submission: **Instances**, **Instance Groups**, or **Staged Configs**. +6. Find the template target you intend to edit. Select the ellipsis (three dots) in the **Actions** column next to the target, then select **Edit Submission**. +7. Complete the forms on the **Preview and Generate Config** dialog in sequence, selecting **Next** to move forward. As you proceed, update any existing inputs to the new settings you wish to apply. +8. When you reach the **View Changes** section, you have the opportunity to compare the previous submission against the new one. Use the **Diff Mode** option to view differences either side-by-side or inline. +9. After you verify the configurations are correct, select **Publish** to update the template submission with your changes and publish the config. +10. Once the updated submission is successfully accepted and confirmed, select **Close and Exit** to complete the editing process. + +--- + +## Additional Templating Resources + +{{< include "nim/templates/additional-templating-resources.md" >}} + diff --git a/content/nim/nginx-configs/config-templates/how-to/rbac-config-templates-and-submissions.md b/content/nim/nginx-configs/config-templates/how-to/rbac-config-templates-and-submissions.md new file mode 100644 index 000000000..55c886792 --- /dev/null +++ b/content/nim/nginx-configs/config-templates/how-to/rbac-config-templates-and-submissions.md @@ -0,0 +1,64 @@ +--- +title: "RBAC for config templates and submissions" +date: 2024-03-29T09:35:06-07:00 +# Change draft status to false to publish doc +draft: false +# Description +# Add a short description (150 chars) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +weight: 300 +toc: true +tags: [ "docs" ] +# Create a new entry in the Jira DOCS Catalog and add the ticket ID (DOCS-) below +docs: "DOCS-1505" +# Taxonomies +# These are pre-populated with all available terms for your convenience. +# Remove all terms that do not apply. +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["tutorial"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] +--- + +## Overview + +With role-based access control (RBAC), administrators can determine who can create, read, update, and delete templates and template submissions. This access control helps you securely manage your NGINX configurations and deployments. + +## Before You Begin + +- Ensure the user is added as a [basic authentication]({{< relref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md" >}}) or [OpenID Connect (OIDC)]({{< relref "/nim/admin-guide/authentication/oidc/getting-started.md" >}}) user in F5 NGINX Instance Manager. +- Familiarize yourself with the concepts of [Roles]({{< relref "/nim/admin-guide/rbac/assign-roles.md" >}}) and [Permissions]({{< relref "/nim/admin-guide/rbac/create-roles.md" >}}) within Instance Manager as they are crucial for managing access. + +## Assign Roles for Templates and Template Submissions + +To complete these steps, you need administrator access. + +1. Open your web browser, go to the Fully Qualified Domain Name (FQDN) of your NGINX Management Suite host, and log in. + +2. Select **Settings** (gear icon) in the upper-right corner of the dashboard. + +3. Select **Users** from the left menu to manage individual users or **User Groups** for managing access at a group level. + +4. Define access to templates and template submissions: + - To ensure proper management of NGINX configurations, [create or edit roles]({{< relref "/nim/admin-guide/rbac/create-roles.md" >}}) to specify access to templates and template submissions. This may involve defining CRUD permissions specific to managing NGINX configurations. + + - **Restricting access to templates** is essential for controlling who can create and modify templates. This responsibility generally falls to administrators with a comprehensive understanding of NGINX configurations. + + - **Restricting access to template submissions** limits who can change submitted template inputs. This role could be assigned to application development teams, allowing them to deploy and manage their apps themselves. + +5. Assign or modify roles: + - For individual users, select a user from the list and click **Edit User**. For user groups, select a group and click **Edit**. + - In the **Roles** list, select the roles you want to assign to the user or user group. Ensure that the roles include permissions for managing templates and/or template submissions. + +6. After assigning roles and permissions, select **Save** to apply the changes. + +--- + +## Additional Templating Resources + +{{< include "nim/templates/additional-templating-resources.md" >}} + diff --git a/content/nim/nginx-configs/config-templates/reference/_index.md b/content/nim/nginx-configs/config-templates/reference/_index.md new file mode 100644 index 000000000..d41c0cf22 --- /dev/null +++ b/content/nim/nginx-configs/config-templates/reference/_index.md @@ -0,0 +1,8 @@ +--- +description: "" +title: Reference +weight: 40 +url: /nginx-instance-manager/nginx-configs/config-templates/reference/ +--- + + diff --git a/content/nim/nginx-configs/config-templates/reference/json-schema-reference.md b/content/nim/nginx-configs/config-templates/reference/json-schema-reference.md new file mode 100644 index 000000000..c22321f24 --- /dev/null +++ b/content/nim/nginx-configs/config-templates/reference/json-schema-reference.md @@ -0,0 +1,257 @@ +--- +title: "JSON Schemas for Dynamic Web Forms" +date: 2024-03-20T09:07:22-07:00 +# Change draft status to false to publish doc +draft: false +# Description +# Add a short description (150 chars) for the doc. Include keywords for SEO. +# The description text appears in search results and at the top of the doc. +description: "" +# Assign weights in increments of 100 +weight: 300 +toc: true +tags: [ "docs" ] +# Create a new entry in the Jira DOCS Catalog and add the ticket ID (DOCS-) below +docs: "DOCS-1504" +# Taxonomies +# These are pre-populated with all available terms for your convenience. +# Remove all terms that do not apply. +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["reference"] +journeys: ["researching", "getting started", "using", "renewing", "self service"] +personas: ["devops", "netops", "secops", "support"] +versions: [] +authors: [] +--- + +## Overview + +The F5 NGINX Instance Manager’s web form builder uses JSON schemas to guide and validate user inputs when creating NGINX configurations from templates. This structured input method simplifies the configuration process and ensures adherence to NGINX configuration requirements. + +## JSON Schema + +JSON Schema is a standard explained in detail at [json-schema.org](https://json-schema.org/). + +In NGINX Instance Manager, we support a specific set of common features from the JSON Schema: + +- **Type**: This defines the kind of data a field can hold. More information is available at [Type Definitions](https://json-schema.org/understanding-json-schema/reference/type.html). + + Supported types include: + - [Boolean](#boolean-field): True or false values. + - [String](#string-field): Text. + - [Numeric](#numeric-field): Numbers, both whole and decimal. + - [Object](#object-field): Structures with named properties. + - [Array](#array-field): Lists of items. + +- **Title**: A descriptive, user-friendly name for each form field that lets users know what's required. For further details, refer to [Generic Keywords](https://json-schema.org/understanding-json-schema/reference/generic.html). + +- **Description**: Text to help guide users on what to enter in the form fields. For details, see [Generic Keywords](https://json-schema.org/understanding-json-schema/reference/generic.html). + +- **Examples**: Examples show what valid data looks like so anyone filling out the form knows what to enter. You should give at least one example so users understand the schema's purpose and requirements. More information can be found at [Generic Keywords](https://json-schema.org/understanding-json-schema/reference/generic.html). + +{{< call-out "tip" "Writing effective JSON schema titles and descriptions" >}} The **title** and **description** fields are a key part of the user experience for templates. We recommend making sure that your title and description fields are predictably formatted and provide clear, concise guidance to the user.{{}} + +
    + +**Example http-server.json** + +``` json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "HTTP Server Inputs", + "type": "object", + "properties": { + "templateInput": { + "type": [ + "object", + "null" + ], + "properties": { + "serverName": { + "title": "Server name", + "type": "string", + "description": "Specifies the HTTP server name.", + "examples": [ + "foo.com" + ] + }, + "id": { + "title": "Server ID", + "type": "string", + "description": "Case-sensitive, alphanumeric ID used for specifying augment placement.", + "examples": [ + "main_server" + ] + } + }, + "required": ["serverName", "id"] + } + }, + "required": [] +} +``` + +
    + +## Type-Specific Features + +Each data type in a JSON schema comes with its own set of features that define how the data can be structured and validated. Understanding these features is important for building robust and user-friendly web forms in NGINX Instance Manager. This section explains the characteristics and functionality of each data type so you can use the schema's capabilities for accurate data representation and validation. + +### Boolean field + +- **Supported**: Basic support without specific features detailed as of now. +- **Usage**: Toggles or binary choices in configurations. +- **Full spec**: https://json-schema.org/understanding-json-schema/reference/boolean.html +- **Example**: + + ```json + { + "isDevelopment": { + "type": "boolean" + } + } + ``` + +### String field + +- **Supported Features**: Regular expression patterns, minimum and maximum length constraints, placeholders through examples, and enum for restricted value sets. +- **Usage**: Text inputs with validation criteria. +- **Full spec**: https://json-schema.org/understanding-json-schema/reference/string.html + +#### Examples + +- Regular string field with pattern and length constraints: + + ```json + { + "test": { + "type": "string", + "pattern": "^[A-Za-z0-9\\s]*$", + "minLength": 1, + "maxLength": 140, + "examples": ["value 1", "value 2"] + } + } + ``` + +- String Select field with enumerated values: + + ```json + { + "method": { + "type": "string", + "enum": ["GET", "POST", "PUT", "DELETE"] + } + } + ``` + +- String MultiSelect field within an array: + + ```json + { + "method": { + "type": "array", + "items": { + "type": "string", + "enum": ["GET", "POST", "PUT", "DELETE"] + } + } + } + ``` + +### Numeric field + +- **Supported Features**: Multiple validation constraints including `multipleOf`, range specifications through `minimum` and `maximum`, and enum for restricted numerical values. +- **Usage**: Numeric inputs within specified ranges or fixed values. +- **Full spec**: https://json-schema.org/understanding-json-schema/reference/numeric.html + +#### Examples + +- Regular numeric field with validation criteria: + + ```json + { + "proxyTimeout": { + "type": "number", + "multipleOf": 10, + "minimum": 10, + "maximum": 1000, + "examples": [50, 120, 870] + } + } + ``` + +- Numeric Select field for predefined numerical values: + + ```json + { + "proxyTimeout": { + "type": "number", + "enum": [10, 20, 30, 40, 50] + } + } + ``` + +- Numeric MultiSelect field within an array: + + ```json + { + "proxyTimeout": { + "type": "array", + "items": { + "type": "number", + "enum": [10, 20, 30, 40, 50] + } + } + } + ``` + +### Object field + +- **Supported Features**: Nested object structures with required fields. +- **Usage**: Complex configurations involving nested parameters. +- **Full spec**: https://json-schema.org/understanding-json-schema/reference/object.html +- **Example**: + + ```json + { + "server": { + "type": "object", + "properties": { + "isBackup": { "type": "boolean" }, + "weight": { "type": "integer" }, + "service": { "type": "string" } + }, + "required": ["service"] + } + } + ``` + +### Array field + +- **Supported Features**: Minimum and maximum item counts, uniqueness constraints. +- **Usage**: Lists or collections of configuration items. +- **Full spec**: https://json-schema.org/understanding-json-schema/reference/array.html +- **Example**: + + ```json + { + "headers": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "value": { "type": "string" } + }, + "required": [] + } + } + } + ``` + +--- + +## Additional Templating Resources + +{{< include "nim/templates/additional-templating-resources.md" >}} diff --git a/content/nim/nginx-configs/config-templates/tutorials/_index.md b/content/nim/nginx-configs/config-templates/tutorials/_index.md new file mode 100644 index 000000000..856c6c691 --- /dev/null +++ b/content/nim/nginx-configs/config-templates/tutorials/_index.md @@ -0,0 +1,8 @@ +--- +description: "" +title: Tutorials +weight: 30 +url: /nginx-instance-manager/nginx-configs/config-templates/tutorials +--- + + diff --git a/content/nim/nginx-configs/config-templates/tutorials/round-robin-reverse-proxy.md b/content/nim/nginx-configs/config-templates/tutorials/round-robin-reverse-proxy.md new file mode 100644 index 000000000..bd5d725b5 --- /dev/null +++ b/content/nim/nginx-configs/config-templates/tutorials/round-robin-reverse-proxy.md @@ -0,0 +1,563 @@ +--- +title: "Round-robin reverse proxy with an augment template" +date: 2024-03-12T16:01:58-07:00 +draft: false +description: "Learn how to set up a round-robin reverse proxy using NGINX Instance Manager with base and augment templates." +weight: 100 +toc: true +tags: [ "docs" ] +docs: "DOCS-1655" +categories: ["installation", "platform management", "load balancing", "api management", "service mesh", "security", "analytics"] +doctypes: ["tutorial"] +journeys: ["getting started", "using"] +versions: [] +authors: [] +--- + +## Overview + +This tutorial guides you through setting up a round-robin reverse proxy using base and augment templates in NGINX Instance Manager. It's intended for network administrators and developers familiar with basic NGINX configurations. + +Using templates, especially augment templates, provides significant advantages. Augment templates allow you to modify and extend configurations without altering the base template, making it easier to manage and update settings. This approach enhances modularity, enabling specific teams to control parts of the configuration while maintaining overall system integrity. For instance, using an augment template for round-robin reverse proxy settings allows you to efficiently distribute incoming traffic across multiple servers, enhancing load balancing and reliability. [Role-Based Access Control (RBAC) for templates and template submissions]({{< relref "/nim/nginx-configs/config-templates/how-to/rbac-config-templates-and-submissions.md" >}}) ensures that only authorized users can make changes, promoting security and efficiency. This means that while administrators can manage the base configuration, development teams can independently manage specific proxy settings, improving collaboration and streamlining operations. + +By the end, you'll be able to: + +- Create and configure a base template. +- Create and configure an augment template to extend the functionality of the base template. +- Deploy these templates to manage traffic efficiently within your NGINX environment. + +--- + +## Background + +F5 NGINX Instance Manager simplifies the management of NGINX configurations across a wide network. Using templates, administrators can ensure consistent configurations while easily customizing individual settings with augment templates. + +--- + +## Before you start + +Before you start the tutorial, you should: + +- [Install NGINX Instance Manager 2.16 or later]({{< relref "/nim/deploy/" >}}). If you're using an earlier version, you'll need to upgrade to access the features needed for this tutorial. +- Have administrative access to NGINX Instance Manager. +- Understand basic concepts of web servers and reverse proxies. +- Have basic knowledge of [Go templates](https://pkg.go.dev/text/template), [JSON schema](https://json-schema.org), and the [NGINX configuration syntax](https://nginx.org/en/docs/beginners_guide.html). + +--- + +## Create the base template + +In this section, you'll learn how to create a [base config template]({{< relref "nim/nginx-configs/config-templates/concepts/config-templates.md" >}}). + +1. Open your web browser, go to the Fully Qualified Domain Name (FQDN) of your NGINX Management Suite host, and log in. +2. From the Launchpad menu, choose **Instance Manager**. +3. On the left menu, select **Templates**. +4. Select **Create**. +5. Select **New**. +6. Give the template a name. For this tutorial, we'll call the base template **rr_base_template**. +7. Optionally, provide a description for the template. (For example, **Round-Robin Base Template**) +8. Select **Draft** for the template status. +9. Select **Base** for the template type. +10. Select **Submit**. + +### Add the base template files + +Here, you'll add the necessary Go template and JSON schema files to your base template. + +1. On the **Template > Overview** page, select **rr_base_template**. +2. Add the config template file: + - Select **Add File**. + - Select **Config File**, then select **base.tmpl**. + - Select **Add**. +3. Add the schema files: + - Select **Add File**. + - Select **Schema File**, then select each of the following: **http-server.json**, **http-upstream**, and **location.json**. + - Select **Add**. + +Your base template should now include the following files: + +{{List of template files including base.tmpl, http-server.json, http-upstream.json, and location.json}} + +### Add the base.tmpl file details {#base-tmpl} + +This snippet defines the structure of the final NGINX configuration file. It uses [Go's text/template](https://pkg.go.dev/text/template) module to dynamically add input into the NGINX configuration. You can identify where augment templates will be inserted by looking for lines like: + +{{}} +``` go +{{ $input.ExecTemplateAugments "main" }} +``` +{{}} + +The word "main" can be replaced with other augment injection points, such as "http-server" or "http-upstream." + +1. In the template file list, select **base.tmpl**. +2. Copy and paste the following Go template into the **base.tmpl** file editor. +3. Select **Save** (disk icon). + +``` go +{{/* + A simple base template that provides augment injection points for + the main context, the http context, upstreams, servers, and locations. +*/}} +{{ $input := .}} +{{ $baseData := .Data.V1}} + +{{/* Inject augments targeting the main context here */}} +{{ $input.ExecTemplateAugments "main" }} +events { + worker_connections 1024; +} + +http { + {{/* Inject augments targeting the http context here */}} + {{ $input.ExecTemplateAugments "http" }} + {{ range $upstreamIndex, $upstream := $baseData.http.upstreams }} + upstream {{$upstream.templateInput.nameInConfig}} { + {{$upstreamTemplateInput := $upstream.templateInput}} + {{range $serverIndex, $upstreamServer := $upstreamTemplateInput.servers}} + {{$port := ""}} + {{if $upstreamServer.port}} + {{$port = (printf ":%0.f" $upstreamServer.port)}} + {{end}} + server {{$upstreamServer.address}}{{$port}}; + {{end}} + + {{/* Inject augments targeting this $upstream here */}} + {{ $input.ExecTemplateAugments "http-upstream" $upstream }} + } + {{end}} + + {{ range $serverIndex, $server := $baseData.http.servers}} + server { + server_name {{$server.templateInput.serverNameInConfig}}; + {{ range $locationIndex, $location := $server.locations}} + location {{$location.templateInput.locationMatchExpression}} { + {{/* Inject augments targeting this $location here */}} + {{ $input.ExecTemplateAugments "location" $location $server }} + } + {{end}} + + {{/* Inject augments targeting this $server here */}} + {{ $input.ExecTemplateAugments "http-server" $server }} + } + {{end}} +} +``` + +### Add the http-server.json file details + +This snippet uses the [JSON Schema](https://json-schema.org/) specification to define fields for generating a user interface and serving as the data structure for inputs injected into the template. + +To understand how this template connects to the [base template you just added](#base-tmpl), look for the `serverNameInConfig` reference in the code. This reference links the JSON schema fields to the configuration settings defined in the base template. + +1. Select **http-server.json**. +2. Copy and paste the following JSON schema into the **http-server.json** file editor. +3. Select **Save** (disk icon). + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "HTTP Servers", + "type": "object", + "properties": { + "templateInput": { + "type": [ + "object", + "null" + ], + "properties": { + "serverName": { + "title": "Server Label", + "type": "string", + "description": "Enter a unique label for the server. This label is displayed in the user interface and acts as a key for matching and applying augment input to the specific server.", + "examples": [ + "Example Server" + ] + }, + "serverNameInConfig": { + "title": "Name", + "type": "string", + "description": "The name of the HTTP server. This value is used when matching incoming requests to the correct server.", + "examples": [ + "example.com" + ] + } + }, + "required": [ + "serverName", + "serverNameInConfig" + ] + } + }, + "required": [] +} +``` + +### Add the http-upstream.json file details + +1. Select **http-upstream.json**. +2. Copy and paste the following JSON schema into the **http-upstream.json** file editor. +3. Select **Save** (disk icon). + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "HTTP Upstreams", + "type": "object", + "properties": { + "templateInput": { + "type": [ + "object", + "null" + ], + "properties": { + "name": { + "title": "Upstream Label", + "type": "string", + "description": "Enter a unique label for the upstream. This label is displayed in the user interface and acts as a key for matching and applying augment input to the specific upstream.", + "examples": [ + "Users Service" + ] + }, + "nameInConfig": { + "title": "Name", + "type": "string", + + "examples": [ + "users_backend" + ] + }, + "servers": { + "type": "array", + "title": "Upstream Servers", + "items": { + "type": "object", + "properties": { + "address": { + "title": "Upstream server address", + "type": "string", + "description": "Specifies the address for the upstream server.", + "examples": [ + "users1.example.com", + "192.0.0.1" + ] + }, + "port": { + "type": "integer", + "title": "Port", + "description": "Specifies the port for the upstream server.", + "minimum": 1, + "maximum": 65535, + "examples": [ + 80 + ] + } + }, + "required": [ + "address" + ] + } + } + }, + "required": [ + "name", + "servers", + "nameInConfig" + ] + } + }, + "required": [] +} +``` + +### Add the location.json file details + +The schema defined in **location.json** is used to create a user interface for defining multiple location blocks. To understand how this works with the template data, look at the [base.tmpl](#base-tmpl) file and find the section that begins with: + +> ``` go +> {{ range $locationIndex, $location := $server.locations}} +> ``` + +Since only one augment will be defined, it's important to link the input from the augment template to a specific location block among the many defined by the user. Duplicate location blocks are generally not useful. To achieve this, the `nameExpression` field in this schema serves as the key element, connecting the content from the augment template to a specific location block. You will see this used in the **location.tmpl** file of the augment template in a later step. + +1. Select **location.json**. +2. Copy and paste the following JSON schema into the **location.json** file editor. +3. Select **Save** (disk icon). + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Locations", + "type": "object", + "properties": { + "templateInput": { + "type": [ + "object", + "null" + ], + "properties": { + "nameExpression": { + "title": "Location Label", + "type": "string", + "description": "Enter a unique label for the location. This label is displayed in the user interface and acts as a key for matching and applying augment input to the specific location.", + "examples": [ + "Users Endpoint" + ] + }, + "locationMatchExpression": { + "title": "Match Expression", + "type": "string", + "description": "The prefix to match request paths by.", + "examples": [ + "/users" + ] + } + }, + "required": [ + "nameExpression", + "locationMatchExpression" + ] + } + }, + "required": [] +} +``` + +--- + +## Create the augment template + +This section shows how to create an augment template that specifies additional configuration details not covered by the base template. + +To create the augment template, take the following steps: + +1. On the **Templates > Overview** page, select **Create**. +2. Select **New**. +3. Give the template a name. For this tutorial, we'll call the augment template **rr_aug_loc_proxy**. +4. Optionally, provide a description for the template. For example, **Round-Robin Location Proxy Augment**. +5. Select **Draft** for the template status. +6. Select **Augment** for the template type. +7. Select **Submit**. + +### Add the augment template files + +Here, you'll add the necessary Go template and JSON schema files to your augment template. + +1. On the **Template > Overview** page, select **rr_aug_loc_proxy**. +2. Add the config template file: + - Select **Add File**. + - Select **Config File**, then select **location.tmpl**. + - Select **Add**. +3. Add the schema files: + - Select **Add File**. + - Select **Schema File**, then select **location.json**. + - Select **Add**. + +Your augment template should now include the following files: + +{{List of template files including base.tmpl, http-server.json, http-upstream.json, and location.json}} + +### Add the location.tmpl details + +This template determines which location block—among those defined by the template user in the NGINX Instance Manager web interface—should receive the specific configuration content. The essential part of the template that will appear in the final NGINX configuration is the line: + +> ``` go +> proxy_pass http://{{ $arguments.upstreamName }}; +> ``` + +The first part of the template checks the location's label, `nameExpression`, and matches it with the data provided by the user to the augment template. It then assigns this data to `$arguments`, which is used to generate the final configuration snippet. This snippet is then injected only into the targeted location block. + +1. Select **location.tmpl**. +2. Copy and paste the following contents into the **location.tmpl** file editor. +3. Select **Save** (disk icon). + +``` go +{{ $augmentData := .AugmentData.V1 }} +{{ $server := index .Args 1 }} +{{ $location := index .Args 0 }} +{{ $arguments := dict }} + +{{/* Get the location label from the base template */}} +{{ $locationLabel := $location.templateInput.nameExpression | trim | lower }} + +{{/* Check if there is a nameExpression (label) for this location ID */}} +{{ range $args := $augmentData.location.templateInput.locations }} + {{ $targetLabel := $args.targetLabel | trim | lower }} + {{ if (eq $targetLabel $locationLabel) }} + {{ $arguments = $args }} + {{ break }} + {{ end + + }} +{{ end }} + +{{/* If augment arguments related to this location were found, perform templating */}} +{{ if not (empty $arguments) }} + proxy_pass http://{{ $arguments.upstreamName }}; +{{ end }} +``` + +### Add the location.json details + +1. Select **location.json**. +2. Copy and paste the following contents into the **location.json** file editor. +3. Select **Save** (disk icon). + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Reverse Proxy Augment", + "type": "object", + "properties": { + "templateInput": { + "type": [ + "object", + "null" + ], + "properties": { + "locations": { + "type": "array", + "title": "Locations", + "description": "To configure each location, the 'Label' you enter below must exactly match a 'Label' in the base template. This ensures the system can properly inject a reverse proxy directive targeting the specified upstream.", + "items": { + "type": "object", + "properties": { + "targetLabel": { + "title": "Target Location Label", + "type": "string", + "description": "Enter the label for this configuration's target location. It must exactly match the 'Location Label' from a base template location to ensure the system correctly injects the augment inputs into the configuration.", + "examples": [ + "Main Location" + ] + }, + "upstreamName": { + "type": "string", + "title": "Upstream Name", + "description": "Name of the target upstream. Must match exactly.", + "examples": [ + "upstream_1" + ] + } + }, + "required": ["targetId", "upstreamName"] + } + } + }, + "required": ["locations"] + } + }, + "required": [] +} +``` + +--- + +## Generate and deploy the configuration {#generate-and-deploy-config} + +Lastly, generate and deploy your configuration. + +1. On the left navigation pane, select **Templates**. +2. Find **rr_base_template** in the list of templates. In the **Actions** column, select the ellipsis (three dots), then select **Preview and Generate**. +3. **Select the publication target**: + - Select whether you're publishing the configuration to an instance, instance group, existing saved config, or saving as a new staged config. + - Then select the specific target from the list. Or, if you're saving as a new staged config, provide the staged config name. + - Select **Next**. +4. **Select the augment template**: + - On the **Choose Augments** form, select **rr_aug_loc_proxy**. + - Select **Next**. +5. **Add HTTP Server(s)**: + - On the **HTTP Servers** form, select **Add HTTP Servers**. + - Enter a unique label for the server (for example, **Round Robin Proxy**). This label is displayed in the user interface and acts as a key for matching and applying augment input to the specific server. + - Enter a server name (for example, **example.com**). + - Add a server location: + - In the **Server Locations** pane, select **Add Server Locations**. + - Enter a unique label for the location (for example, **Users Endpoint**). This label is displayed in the user interface and acts as a key for matching and applying augment input to the specific location. We'll refer to this label when we configure the augment template inputs. + - Enter a match expression. This is the prefix to match request paths by (for example, **/users**). + - Select **Next**. +6. **Add HTTP Upstream(s)**: + + In NGINX, an *upstream* refers to a group of servers that handle client requests. They are typically used for load balancing. + + - On the **HTTP Upstreams** form, select **Add HTTP Upstreams**. + - Enter a unique label for the upstream (for example, **Users Service**). This label is displayed in the user interface and acts as a key for matching and applying augment input to the specific upstream. + - Enter an HTTP upstream name (for example, **users_backend**). We'll refer to this upstream name when we configure the augment template inputs. + - Add an upstream server: + - In the **Upstream Servers** pane, select **Add item**. + - Enter the upstream server address (for example, **192.0.0.1**) + - Enter the upstream server port (for example, **80**). + - Select **Next**. +7. **Add the rr_aug_loc_proxy inputs**. + - In the **Reverse Proxy Augment > Locations** pane, select **Add item**. + - Enter the label for the target location. This is the label that you specified in step 5 when adding the HTTP server location (for example, **Users Endpoint**). Make sure the labels match exactly to correctly apply the augment templates. + - Enter the upstream name. This is the name you specified in step 6 (for example, **users_backend**). Make sure the names match exactly. + - Select **Next**. +8. **Preview the config**: + + On the **Preview Config** page, the resulting config should look similar to the following example: + + ```nginx + # /******************************** !! IMPORTANT !! ********************************/ + # This is a Template generated configuration. Updates should done through the Template + # Submissions workflow. Manual changes to this configuration can/will be overwritten. + # __templateTag={"uid":"3c4f0137-058f-4275-a8e2-1a1077ae8e8c","file":"base.tmpl","name":"rr_base_template"} + events { + worker_connections 1024; + } + http { + upstream users_backend { + server 192.0.0.1:80; + } + server { + server_name example.com; + location /users { + + # <<< BEGIN DO-NOT-EDIT location augment templates >>> + + # __templateTag={"uid":"8c903bd1-c11f-4820-8f0b-d12418a41bad","file":"location.tmpl","name":"rr_aug_loc_proxy"} + include /etc/nginx/augments/location/base_http-server1_loc1_3c4f0137_8c903bd1-c11f-4820-8f0b-d12418a41bad.conf; + + # <<< END DO-NOT-EDIT location augment templates >>> + + } + } + } + ``` + +9. If the configuration looks correct, select **Publish** to deploy it, or select **Save** if you're targeting a staged configuration. + +--- + +## Verification steps + +If you targeted an NGINX instance or instance group: + +1. Open a secure connection to your instance(s) and log in. +2. Open the NGINX config (**/etc/nginx/nginx.conf**) with a text editor. The configuration file should match the config output you [previewed and generated in the previous section](#generate-and-deploy-config). +3. Next, look for a new file under */etc/nginx/augments/location/* directory with a name similar to **base_http-server1_loc1_[unique-id].conf**, where **[unique-id]** is a unique identifier string. Open this file in a text editor. +The contents should include the **proxy_pass** directive: + + ``` nginx + proxy_pass http://users_backend; + ``` + +
    + +If you targeted a staged config: + +1. On the left menu, select **Staged Configs**. +2. Choose the name of the staged configuration you saved. +3. In the file viewer, select **nginx.conf**. The configuration file should match the config output you [previewed and generated in the previous section](#generate-and-deploy-config). +4. Then, look for a file in the */etc/nginx/augments/location/* directory with a name similar to **base_http-server1_loc1_[unique-id].conf**, where **[unique-id]** is a unique identifier string. The contents should include the **proxy_pass** directive: + + ``` nginx + proxy_pass http://users_backend; + ``` + +--- + +## References + +- [Understanding Config Templates]({{< relref "/nim/nginx-configs/config-templates/concepts/config-templates.md" >}}) +- [About Augment Templates]({{< relref "/nim/nginx-configs/config-templates/concepts/augment-templates.md" >}}) +- [Manage NGINX Configs with Config Templates]({{< relref "/nim/nginx-configs/config-templates/how-to/manage-nginx-configs-with-templates.md" >}}) +- [RBAC for Templates and Template Submissions]({{< relref "/nim/nginx-configs/config-templates/how-to/rbac-config-templates-and-submissions.md" >}}) diff --git a/content/nim/nginx-configs/publish-configs-version-control.md b/content/nim/nginx-configs/publish-configs-version-control.md new file mode 100644 index 000000000..062bd3479 --- /dev/null +++ b/content/nim/nginx-configs/publish-configs-version-control.md @@ -0,0 +1,141 @@ +--- +docs: DOCS-1348 +title: Publish NGINX configs with version control +toc: true +weight: 3 +--- + +## Overview + +With F5 NGINX Instance Manager, you can easily edit and publish NGINX configurations to your NGINX Open Source and NGINX Plus instances. This guide explains how to set up your version control system as the source of truth for maintaining NGINX configurations and how to publish modified configurations to any instance or instance group managed by NGINX Instance Manager. + +The documentation includes examples of how to publish configurations from GitLab or [GitHub](https://github.com/nginxinc/git-automation) as the version control system to manage your configurations. + +--- + +## Before you begin + +To complete the instructions in this guide, ensure: + +- NGINX Instance Manager is installed, licensed, and running. +- You have one or more NGINX data plane instances or instance groups. +- A version control system maintains your configuration files for at least one instance or instance group. +- A Docker base [image](https://github.com/nginxinc/git-automation/blob/main/dockerfile) is available to run in the pipeline. + +--- + +## Publishing configurations from version control system + +### Set up the pipeline + +To set up the pipeline to push configuration changes upstream to an instance or instance group, you will define variables for sending API requests to NGINX Instance Manager, prepare the payload for the requests, and define rules to trigger the action using [CI/CD Pipelines](https://docs.gitlab.com/ee/ci/pipelines/). + +Add a `.gitlab-ci.yaml` file at the root directory of your repository to manage the configuration files. You can define variables to reference in the pipeline for various purposes, such as sending [curl](https://curl.se/) requests to NGINX Instance Manager. + +Here are some example variables you might define: + +- **CTRL_IP**: System IP of NGINX Instance Manager. Example: `198.51.100.1`. +- **AUTH_TOKEN**: Authorization token for connecting to NGINX Instance Manager. Example: `YWRtaW46VGVzdGVudjEyIw==`. +- **SYSTEM_UID**: System UID of the instance to push configuration changes. Example: `fbf7a63f-a394-34b7-8775-93d7d6aceb82`. +- **NGINX_UID**: NGINX UID of the instance to push configuration changes. Example: `98961494-c999-515c-ae1b-1dd949f78b6e`. +- **GROUP_UID**: Instance Group UID of the instance group to push configuration changes. Example: `0ba1d2c3-ce36-44da-a786-94fb65425a30`. + +You can introduce rules in the `.gitlab-ci.yaml` file to trigger the pipelines when changes are detected in the configuration files. Here’s an example repository structure: + +```none +git-automation: + -> instances + -> nginx.conf + -> mime.types + -> instance-group + -> nginx.conf + -> mime.types + .gitlab-ci.yaml +``` + +To run the pipeline only when changes are detected in the instance directory, you could use the following rule: + +```yaml +._run_only_when_change_in_instance_directory: &_run_only_when_change_in_instance_directory + changes: + - instance/nginx.conf + - instance/* +``` + +### Define pipeline stages + +We need to add different pipeline [stages](https://docs.gitlab.com/ee/ci/yaml/?query=stages#stages), define when the actions will be triggered using rules, and what to run in the pipeline: + +```yaml +stages: + - publish + +publish-config-to-instance: + image: ${BUILD_IMAGE} + stage: publish + script: + # Run the script that prepares the payload with config changes and required variables + # ./prepare-payload.sh ${CTRL_IP} ${AUTH_TOKEN} ${SYSTEM_UID} ${NGINX_UID} + only: + <<: *_run_only_when_change_in_instance_directory + needs: [] +``` + +### Prepare the payload + +We use a bash script to create a valid payload and send it via `POST` to NGINX Instance Manager for the instance or instance group. To prepare the payload, encode the file contents of `nginx.conf`, get the current time in the format `Year-Month-DayTHour:Minute:SecondZ`, and assign the commit SHA to `externalId`. + +You can find sample scripts for preparing payloads for [instances](https://github.com/nginxinc/git-automation/blob/main/prepare-payload.sh) and [instance groups](https://github.com/nginxinc/git-automation/blob/main/prepare-instGroup-payload.sh). + +#### Example bash script + +```bash +#!/bin/bash +set -o pipefail + +DEFAULT_INSTANCE_CONFIG_FILE_PATH="./instance/nginx.conf" +DEFAULT_MIME_TYPES_FILE_PATH="./instance/mime.types" + +publish_config_to_instance() { + local ctrl_ip=$1 + local auth_token=$2 + local system_uid=$3 + local nginx_uid=$4 + + if [ -z "${ctrl_ip}" ] || [ -z "${auth_token}" ] || [ -z "${system_uid}" ] || [ -z "${nginx_uid}" ]; then + echo "Missing required variable" + exit 1 + fi + + ic_base64=$(base64 < "${DEFAULT_INSTANCE_CONFIG_FILE_PATH}" | tr -d '\n') + mime_base64=$(base64 < "${DEFAULT_MIME_TYPES_FILE_PATH}" | tr -d '\n') + update_time=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + version_hash="${CI_COMMIT_SHA}" + + payload=$(jq -n --arg versionHash "${version_hash}" --arg updateTime "${update_time}" --arg config "${ic_base64}" --arg mime "${mime_base64}" '{ + "auxFiles": { "files": [], "rootDir": "/" }, + "configFiles": { + "rootDir": "/etc/nginx", + "files": [ + { "contents": $config, "name": "/etc/nginx/nginx.conf" }, + { "contents": $mime, "name": "/etc/nginx/mime.types" } + ] + }, + "updateTime": $updateTime, + "externalId": $versionHash, + "externalIdType": "git" + }') + + echo "${payload}" | curl -k -H 'Content-Type: application/json' -H "Authorization: Bearer " --data-binary @- -X POST "https://$ctrl_ip/api/platform/v1/systems/$system_uid/instances/$nginx_uid/config" +} + +publish_config_to_instance "$@" +``` + +--- + +### Publish config changes from version control system + +To publish configuration changes, modify the configuration files (`nginx.conf`) and push your changes upstream to trigger the pipeline. The process is the same whether you are pushing changes to individual instances or to instance groups. You can modify the script and pipeline rules based on whether you're targeting instances or instance groups. + +{{< note >}}You can find a sample template to modify as required in our [public repository](https://github.com/nginxinc/git-automation/).{{
    }} diff --git a/content/nim/nginx-configs/publish-configs.md b/content/nim/nginx-configs/publish-configs.md new file mode 100644 index 000000000..cf4b84a04 --- /dev/null +++ b/content/nim/nginx-configs/publish-configs.md @@ -0,0 +1,222 @@ +--- +docs: DOCS-822 +doctypes: +- tutorial +tags: +- docs +title: Publish NGINX configs +toc: true +weight: 2 +--- + +## Overview + +With F5 NGINX Instance Manager, you can easily edit and publish NGINX configurations to your NGINX and F5 NGINX Plus instances. As you edit your configurations, the NGINX config analyzer will automatically detect and highlight errors, ensuring accuracy and reliability. + +--- + +## Before you begin + +To complete the instructions in this guide, ensure: + +- NGINX Instance Manager is installed, licensed, and running. +- You have one or more NGINX data plane instances. + +{{< call-out "tip" "Interacting with the API">}} You can use tools such as `curl` or [Postman](https://www.postman.com) to interact with the Instance Manager REST API. The API URL follows the format `https:///api/nim/` and must include authentication information with each call. For more information about authentication options, refer to the [API Overview]({{< relref "/nim/fundamentals/api-overview.md" >}}).{{}} + +--- + +## Publishing to instances + +### Publish config changes to instances {#publish-configs} + +To edit an instance's configuration and publish the changes, follow these steps: + +1. On the left menu, select **Instances**. +2. Select the instance you want to publish a configuration update to. +3. Select **Edit Config**. +4. Make your changes to the configuration files. The config analyzer will highlight any errors. +5. Select **Publish** to apply the changes and publish them to the instance. + +### Publish configs with hash versioning (API only) {#publish-configs-instances-hash-versioning} + +With the Instance Manager REST API, you can add a commit hash to NGINX configurations if you use version control, such as Git. This allows you to retrieve a configuration with a unique version identifier. + +#### HTTP Request (POST) + +To add a commit hash to a new or existing config using the REST API, send an HTTP `POST` or `PUT` request to the Configs endpoint. + +- **Method**: `POST` or `PUT` +- **Endpoint**: `/systems/{systems_uid}/instances/{instance_uid}/config` + +#### Parameters + +When adding version control identifiers to a config, include the following parameters: + +- **externalID** (optional): The commit hash, 1–150 characters. For example, `521747298a3790fde1710f3aa2d03b55020575aa`. +- **externalIdType** (optional): The type of commit used for the config update. Possible values are `git` or `other`. If `externalID` isn't specified, the type defaults to `other`. + +#### Example JSON Request + +```json +{ + "auxFiles": { + "files": [], + "rootDir": "/" + }, + "configFiles": { + "rootDir": "/etc/nginx", + "files": [ + { + "contents": "base64_encoded_contents", + "name": "/etc/nginx/nginx.conf" + } + ] + }, + "updateTime": "2023-02-22T17:10:02.677Z", + "externalId": "521747298a3790fde1710f3aa2d03b55020575aa", + "externalIdType": "git" +} +``` + +{{}} If you edit an NGINX configuration in the Instance Manager web interface or directly on the data plane, previous hashed commit information will be lost: `externalID` will revert to `null` and `externalIdType` will revert to `other` automatically. {{}} + +#### HTTP Request (GET) + +To view the latest version of a configuration, send an HTTP `GET` request to the Systems endpoint. + +- **Method**: `GET` +- **Endpoint**: `/systems/{systems_uid}/instances/{instance_uid}` + +To view a specific configuration with a version-controlled hash, send an HTTP `GET` request to the Configs endpoint and specify the `externalID`. + +- **Method**: `GET` +- **Endpoint**: `/systems/{systems_uid}/instances/{instance_uid}/config?externalId={hash}` + +#### Example JSON Response + +```json +{ + "build": { + "nginxPlus": true, + "release": "nginx-plus-r28", + "version": "1.23.2" + }, + "configPath": "/etc/nginx/nginx.conf", + "configVersion": { + "instanceGroup": { + "createTime": "0001-01-01T00:00:00Z", + "uid": "", + "versionHash": "" + }, + "versions": [ + { + "createTime": "2023-02-28T19:54:58.735Z", + "externalId": "521747298a3790fde1710f3aa2d03b55020575aa", + "externalIdType": "git", + "uid": "92a9dbfa-dc6a-5bf9-87dd-19405db0b9c0", + "versionHash": "c0f7abbd9b9060c75985b943f3ec0cfc7e4b28cbf50c26bfd0b2141bb6c277a3" + } + ] + } +} +``` + +--- + +## Publishing to instance groups {#publish-to-instance-groups} + +### Publish config changes to instance groups + +To publish a configuration file to an instance group: + +1. On the left menu, select **Instance Groups**. +2. Select the instance group you want to publish the configuration to. +3. To add a new config: + - Select **Add File**. + - Add the path and filename of the new file. + - Select **Create**. + - On the file editor page, type or paste the contents of the new file. The config analyzer will highlight any errors. +4. To update an existing config, make your changes in the editor. The config analyzer will highlight any errors. +5. Select **Publish** to apply the changes and publish them to the instance group. + +### Publish configs with hash versioning (API only) {#publish-configs-instance-groups-hash-versioning} + +You can add a commit hash to NGINX configurations for instance groups using the Instance Manager REST API. + +#### HTTP Request (POST) + +To add a commit hash to a new or existing config for an instance group, send an HTTP `POST` or `PUT` request to the Configs endpoint. + +- **Method**: `POST` or `PUT` +- **Endpoint**: `/instance-groups/{instanceGroupUID}/config` + +#### Example JSON Request + +```json +{ + "auxFiles": { + "files": [], + "rootDir": "/" + }, + "configFiles": { + "rootDir": "/etc/nginx", + "files": [ + { + "contents": "base64_encoded_contents", + "name": "/etc/nginx/nginx.conf" + } + ] + }, + "updateTime": "2023-02-22T17:10:02.677Z", + "externalId": "8acf5aed9d2872b266d2f880cab23a4aa5791d1b", + "externalIdType": "git" +} +``` + +#### HTTP Request (GET) + +To view an instance group's config, send an HTTP `GET` request to the Instance Groups endpoint. + +- **Method**: `GET` +- **Endpoint**: `/instance-groups/{instanceGroupUID}` + +To view an instance group's config with a version-controlled hash, send an HTTP `GET` request to the Instance Groups endpoint and specify the `externalID`. + +- **Method**: `GET` +- **Endpoint**: `/instance-groups/{instanceGroupUID}/config?externalId={commit_hash}` + +#### Example JSON response + +```json +{ + "build": { + "nginxPlus": true, + "release": "nginx-plus-r28", + "version": "1.23.2" + }, + "configPath": "/etc/nginx/nginx.conf", + "configVersion": { + "instanceGroup": { + "createTime": "0001-01-01T00:00:00Z", + "uid": "", + "versionHash": "" + }, + "versions": [ + { + "createTime": "2023-02-28T19:54:58.735Z", + "externalId": "8acf5aed9d2872b266d2f880cab23a4aa5791d1b", + "externalIdType": "git", + "uid": "92a9dbfa-dc6a-5bf9-87dd-19405db0b9c0", + "versionHash": "c0f7abbd9b9060c75985b943f3ec0cfc7e4b28cbf50c26bfd0b2141bb6c277a3" + } + ] + } +} +``` + +--- + +## Publishing staged configs + +For complete instructions on publishing staged configurations, see the [Stage NGINX configs]({{< relref "/nim/nginx-configs/stage-configs.md#publish-staged-configs" >}}) guide. diff --git a/content/nim/nginx-configs/stage-configs.md b/content/nim/nginx-configs/stage-configs.md new file mode 100644 index 000000000..34bb65b90 --- /dev/null +++ b/content/nim/nginx-configs/stage-configs.md @@ -0,0 +1,158 @@ +--- +docs: DOCS-1277 +doctypes: +- task +tags: +- docs +title: Stage NGINX configs +toc: true +weight: 1 +--- + +## Overview + +With F5 NGINX Instance Manager, you can easily pre-configure and stage NGINX configuration files, so you can quickly publish them to individual NGINX instances or instance groups whenever you're ready. + +## Before you begin + +To complete the instructions in this guide, ensure: + +- NGINX Instance Manager is installed, licensed, and running. + +{{}} You can use tools such as `curl` or [Postman](https://www.postman.com) to interact with the Instance Manager REST API. The API URL follows the format `https:///api/nim/` and must include authentication information with each call. For more information about authentication options, refer to the [API Overview]({{< relref "/nim/fundamentals/api-overview.md" >}}). {{}} + +--- + +## Stage an instance's config + +To stage an NGINX instance's configuration for publication to other instances or instance groups, follow these steps: + +1. On the left menu, select **Instances**. +2. On the Instances Overview page, select an instance in the list that has the configuration you want to stage. +3. Select **Edit Config**. +4. Make your desired changes to the instance's config files. The config analyzer will let you know if there are any errors. +5. To stage your changes so you can publish them later, select **Save as** and provide a name for the staged config. +6. Select **Save**. +7. To view the staged config, select **Staged Configs** on the left menu. The staged config should appear in the list. + +--- + +## Create a new staged config + +Follow these steps to create a new staged configuration: + +1. On the left menu, select **Staged Configs**. +2. Select **Create**. +3. On the Create Config form, complete the necessary fields: + - **Config name**: Give the staged config a name. + - **Config files root directory**: Specify the root directory where configuration files can be written to and read from. The default is `/etc/nginx/`. + - **Aux files root directory**: Specify the root directory where auxiliary files can be written to and read from. The default is `/etc/nginx/aux`. +4. Select **Create**. +5. On the Overview page, select the staged config you just created. +6. Select **Add File**. +7. Specify the file path and file name. For example, `/etc/nginx/conf.d/default.conf`. +8. Select **Create**. +9. Type or paste the contents of the file in the configuration editor. +10. Repeat steps 6–9 to add other files. +11. Select **Save** when you're done. + +--- + +## Update a staged configuration + +To update a staged configuration: + +1. On the left menu, select **Staged Configs**. +2. From the list of staged configs, select one you want to update. +3. Edit the staged configuration files as needed. The config analyzer will let you know if there are any errors. +4. Select **Save** to save the configuration. + +--- + +## Publishing staged configs {#publish-staged-configs} + +To publish a staged configuration to an NGINX instance or instance group: + +1. On the left menu, select **Staged Configs**. +2. From the list of staged configs, select the one you want to publish. +3. Select **Publish to**. +4. Select the NGINX instance or instance group to publish the staged config to. +5. Select **Publish**. + +### Publish configs with hash versioning (API only) {#hash-versioning-staged-configs} + +With the Instance Manager REST API, you can add a commit hash to NGINX configurations if you use version control, such as Git. This allows you to retrieve a configuration with a unique version identifier. + +#### HTTP request (POST) + +To add a commit hash to a new or staged config using the REST API, send an HTTP `POST` or `PUT` request to the Configs endpoint. + +- **Method**: `POST` or `PUT` +- **Endpoint**: `/configs` + +#### Parameters + +When adding version control identifiers to a config, include the following parameters: + +- **externalID** (optional): The commit hash, 1–150 characters. For example, `521747298a3790fde1710f3aa2d03b55020575aa`. +- **externalIdType** (optional): The type of commit used for the config update. Possible values are `git` or `other`. If `externalID` isn't specified, the type defaults to `other`. + +#### Example JSON request + +```json +{ + "configName": "test-staged", + "configFiles": { + "rootDir": "/etc/nginx", + "files": [ + { + "contents": "base64_encoded_contents", + "name": "/etc/nginx/nginx.conf" + } + ] + }, + "auxFiles": { + "rootDir": "/etc/nginx/aux", + "files": [] + }, + "externalId": "8acf5aed9d2872b266d2f880cab23a4aa5791d1b", + "externalIdType": "git" +} +``` + +{{}} If you edit an NGINX configuration in the Instance Manager web interface or directly on the data plane, previous hashed commit information will be lost: `externalID` will revert to `null` and `externalIdType` will revert to `other` automatically.{{}} + +#### HTTP request (GET) + +To view a staged config, send an HTTP `GET` request to the Configs endpoint. + +- **Method**: `GET` +- **Endpoint**: `/configs/{config_uid}` + +To view a staged config with a version-controlled hash, send an HTTP `GET` request to the Configs endpoint and specify the `externalID`. + +- **Method**: `GET` +- **Endpoint**: `/configs/{config_uid}?externalId={commit_hash}` + +#### Example JSON response + +```json +{ + "configName": "test-staged", + "configFiles": { + "rootDir": "/etc/nginx", + "files": [ + { + "contents": "base64_encoded_contents", + "name": "/etc/nginx/nginx.conf" + } + ] + }, + "auxFiles": { + "rootDir": "/etc/nginx/aux", + "files": [] + }, + "externalId": "8acf5aed9d2872b266d2f880cab23a4aa5791d1b", + "externalIdType": "git" +} +``` diff --git a/content/nim/nginx-instances/_index.md b/content/nim/nginx-instances/_index.md new file mode 100644 index 000000000..84061a18b --- /dev/null +++ b/content/nim/nginx-instances/_index.md @@ -0,0 +1,5 @@ +--- +title: NGINX instances +weight: 80 +url: /nginx-instance-manager/nginx-instances/ +--- \ No newline at end of file diff --git a/content/nim/nginx-instances/add-tags.md b/content/nim/nginx-instances/add-tags.md new file mode 100644 index 000000000..df3b51ff8 --- /dev/null +++ b/content/nim/nginx-instances/add-tags.md @@ -0,0 +1,58 @@ +--- +description: Follow the steps in this guide to create tags for organizing your instances. +docs: DOCS-829 +doctypes: +- tutorial +tags: +- docs +title: Organize instances with tags +toc: true +weight: 650 +--- + +## Overview + +Tags allow you to label and group NGINX instances in F5 NGINX Instance Manager. + +## Adding a Tag {#add-tag} + +There are two ways you can add tags: + +- Add tags to the `agent-dynamic.conf` file. +- Add tags using the Instance Manager web interface. + +### Add Tags with a Configuration File {#add-tag-config} + +You can edit the `agent-dynamic.conf` file to add a tag to an instance. + +These tags show up during registration and can't be removed from the web interface -- if you try to remove these tags in the web interface, the NGINX Agent adds them back when restarting. + +To add tags to the configuration file, take the following steps: + +1. Edit the `/var/lib/nginx-agent/agent-dynamic.conf` file and add the tags in a list under the key `tags:` + + {{}}If you're running Instance Manager 2.10.1 or earlier or NGINX Agent 2.25.1 or earlier, the `agent-dynamic.conf` file is located in `/etc/nginx-agent/`.{{}} + +2. Restart the NGINX Agent service: + + ```bash + sudo systemctl restart nginx-agent + ``` + +### Add Tags using the Web Interface {#add-tags-UI} + +To add tags using the Instance Manager web interface, take the following steps: + +1. In a web browser, go to the FQDN for your NGINX Management Suite host and log in. + +1. In the left menu, select **Instances**. + +1. Select an instance row in the list of instances. An informational panel for the instance is displayed. + +1. Select **Edit**. + +1. In the **System Tags** box, select one or more tags to associate with the instance. To add a new tag, type the name for the tag, then select **Add New Tag**. + + - You can remove a tag from an instance by clicking the `x` next to the tag's name. + +1. Select **Save**. diff --git a/content/nim/nginx-instances/manage-certificates.md b/content/nim/nginx-instances/manage-certificates.md new file mode 100644 index 000000000..2beeb7a4d --- /dev/null +++ b/content/nim/nginx-instances/manage-certificates.md @@ -0,0 +1,188 @@ +--- +docs: DOCS-821 +doctypes: +- tutorial +tags: +- docs +title: Manage SSL certificates +toc: true +weight: 650 +--- + +{{< include "nim/decoupling/note-legacy-nms-references.md" >}} + +## About certificates {#about-certificates} + +You can add certificates to **F5 NGINX Instance Manager** using the web interface or the REST API. Certificates in NGINX Instance Manager are stored in PEM format in an internal secret store. They can be published to NGINX instances, which use certificates to encrypt and decrypt requests and responses. + +NGINX Instance Manager can import the following types of certificates: + +1. **PEM** (Privacy Enhanced Mail): A container format that includes an entire certificate chain, including the public key, private key, and any intermediate root certificates. +2. **PKCS12** (Public-Key Cryptography Standards): A container format with multiple embedded objects, such as multiple certificates. The contents are base64-encoded. + +--- + +## Add managed certificates + +You need to create a certificate before you can add one to NGINX Instance Manager. Use [OpenSSL](https://www.openssl.org) or a similar service to create the certificate. + +If you’re uploading a **PKCS12** certificate, make sure to encode it in base64 before adding it to NGINX Instance Manager. Use the following command to encode the certificate: + + ```bash + cat .pkcs12 | base64 > .pkcs12 + ``` + +To add a certificate to NGINX Instance Manager, take the following steps: + +1. Open the NGINX Instance Manager web interface and log in. +2. Under **Modules**, select **Instance Manager**. +3. In the left menu, select **Certificates**. +4. Select **Add**. +5. In the **Name** box, enter a name for the certificate. +6. Choose the import method: + - **Import PEM or PKCS12 file**: Drag and drop the certificate file into the upload section, or select **Browse** to find and upload the file. + - **Copy and paste PEM text**: Paste the appropriate certificate contents into the **Private Key**, **Public Certificate**, and **Issuing CA Certificates** boxes. +7. Select **Add**. + +--- + +## Identify expiring or expired certificates {#identify-expiring-or-expired-certificates} + +To identify certificates that are expired or expiring soon: + +1. Open the NGINX Instance Manager web interface and log in. +2. Under **Modules**, select **Instance Manager**. +3. In the left menu, select **Certificates**. + +You will see the status of certificates as either `Expired`, `Expiring`, or `Healthy`, along with the expiration date. A certificate is considered `Expiring` if it will expire in fewer than 30 days. + +To update expiring or expired certificates, select **Edit** and provide the new certificate details. + +--- + +## Replace managed certificates + +#### Web interface + +To replace a certificate using the web interface: + +1. Open the NGINX Instance Manager web interface and log in. +2. Under **Modules**, select **Instance Manager**. +3. In the left menu, select **Certificates**. +4. Select the certificate you want to replace, then select **Edit**. +5. Paste the appropriate certificate contents into the **Private Key**, **Public Certificate**, and **Issuing CA Certificates** boxes. +6. Select **Save**. + +#### API + +{{}}{{< include "nim/how-to-access-nim-api.md" >}}{{}} + +To replace a certificate using the NGINX Instance Manager REST API, send a `PUT` request like the following to the Certificates API endpoint: + +```bash +curl -X PUT "https://nginx-manager.example.com/api/platform/v1/certs/pem_cert_with_ca" \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "pem_cert_with_ca", + "certPEMDetails": { + "type": "PEM", + "privateKey": "-----BEGIN PRIVATE KEY----------END PRIVATE KEY-----", + "publicCert": "-----BEGIN CERTIFICATE----------END CERTIFICATE-----", + "password": "", + "caCerts": [ + "-----BEGIN CERTIFICATE----------END CERTIFICATE-----" + ] + }, + "instanceRefs": [ + "/api/platform/v1/systems//instances/" + ] + }' + ``` + +--- + +## Delete managed certificates {#delete-certs} + +#### Web interface + +To delete a certificate using the web interface: + +1. Open the NGINX Instance Manager web interface and log in. +2. Under **Modules**, select **Instance Manager**. +3. In the left menu, select **Certificates**. +4. Select the certificate you want to delete, then select **Delete**. + +#### API + +To delete a certificate using the NGINX Instance Manager REST API, send a `DELETE` request like the following to the Certificates API endpoint: + +```bash +curl -X DELETE "https://nginx-manager.example.com/api/platform/v1/certs/pem_cert_with_ca" \ + -H "accept: application/json" +``` + +--- + +## Convert remote certificates to managed certificates + +#### API + +To convert a remote certificate to a managed certificate using the NGINX Instance Manager REST API, send a `PUT` request to the Certificates API endpoint. This request should include both the public certificate and private key, like in the following example: + +```bash +curl -X PUT "https://nginx-manager.example.com/api/platform/v1/certs/pem_cert_with_ca" \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "pem_cert_with_ca", + "certPEMDetails": { + "type": "PEM", + "privateKey": "-----BEGIN PRIVATE KEY----------END PRIVATE KEY-----", + "publicCert": "-----BEGIN CERTIFICATE----------END CERTIFICATE-----", + "password": "", + "caCerts": [ + "-----BEGIN CERTIFICATE----------END CERTIFICATE-----" + ] + }, + "instanceRefs": [] + }' + ``` + +--- + +## Rotate encryption keys {#rotate-encryption-keys} + +To manage certificates securely, you should rotate encryption keys regularly or when a key is compromised. + +{{< call-out "important" "Service Disruption Warning" "fas fa-exclamation-triangle" >}}You must stop the `nms-core` service to rotate keys. Stopping this service is disruptive, so you should plan a downtime window for the rotation.{{< /call-out >}} + +To rotate the certificate encryption key: + +1. Open an SSH connection to the F5 NGINX Management Suite host. +2. Run the following command to stop the `nms` service: + + ```bash + sudo systemctl stop nms + ``` + +3. Run the following command to rotate the encryption keys: + + ```bash + sudo runuser -u nms -- nms-core secret rotate + ``` + + This command performs the following steps: + + 1. Generates a new 256-bit encryption key. + 2. Reads the certificates using the old key. + 3. Generates a new 192-bit salt. + 4. Re-encrypts the certificates with the new key and salt. + 5. Repeats steps 2–4 for all certificates. + 6. Deletes the old encryption key. + +4. Now that you've rotated encryption keys, start the `nms` service: + + ```bash + sudo systemctl start nms + ``` diff --git a/content/nim/nginx-instances/manage-instance-groups.md b/content/nim/nginx-instances/manage-instance-groups.md new file mode 100644 index 000000000..2524c3e28 --- /dev/null +++ b/content/nim/nginx-instances/manage-instance-groups.md @@ -0,0 +1,234 @@ +--- +description: Learn how to use F5 NGINX Management Suite Instance Manager to create instance + groups, which you can use to manage multiple NGINX instances as a single entity. +docs: DOCS-935 +doctypes: +- tutorial +tags: +- docs +title: Create and manage instance groups +toc: true +weight: 600 +--- + +## Overview + +You can easily manage multiple NGINX instances as a single entity by creating an instance group in Instance Manager and adding NGINX instances to it. + +--- + +## Before You Begin + +To complete the instructions in this guide, you need the following: + +- An installed version of [Instance Manager]({{< relref "/nim/deploy/vm-bare-metal/_index.md" >}}) +- One or more NGINX data plane instances + +--- + +## Create Instance Groups {#add-instance-groups} + +To create an instance group: + +1. {{< include "nim/webui-nim-login.md" >}} +2. On the left navigation menu, select **Instance Groups**. +3. Select **Create**. +4. On the **Create Instance Group** form, complete the necessary fields: + + - **Name**: add a name for the instance group. + - **Display Name**: add a friendly name to show for the instance group. + - (Optional) **Description**: add a brief description for the instance group. + +5. Select **Save**. + +{{}} +When an Instance Group is initially created via the UI/API, its NGINX config will be empty. Adding an Instance to the Instance Group will populated +the Instance Group NGINX nginx with the first member's NGINX config. +{{}} + +--- + +## Add Instances to Instance Groups + +You can assign NGINX instances to instance groups in the following ways: + +- (Preferred) Edit the `agent-dynamic.conf` file on an NGINX instance and specify the instance group. +- Alternatively, when installing the NGINX Agent, you can specify the instance group as a command-line option. + +
    + +{{}} + +{{%tab name="agent-dynamic.conf"%}} + +### Specify Instance Group in Agent-Dynamic.Conf + +You can easily add instances to a default instance group that you specify. To do so, [install the NGINX Agent on an instance]({{< relref "/nms/nginx-agent/install-nginx-agent.md" >}}), then edit the `/var/lib/nginx-agent/agent-dynamic.conf` file as described below. + +{{}}If you're running Instance Manager 2.10.1 or earlier or NGINX Agent 2.25.1 or earlier, the `agent-dynamic.conf` file is located in `/etc/nginx-agent/`.{{}} + +{{< important >}}If the specified instance group doesn't already exist, the NGINX Agent installer will create it, using the current instance's config file as the group's config file. This means that all instances added to the group later will use this config as well. If you're using a script to add instances, you should consider carefully which instance to run the script on first.{{< /important >}} + +1. Open a secure shell (SSH) connection to the NGINX instance and log in. +2. Open the `/var/lib/nginx-agent/agent-dynamic.conf` for editing. +3. Add a value for `instance_group: `, similar to the following example: + + Example: + + ```yaml + # + # /etc/nginx-agent/dynamic-agent.conf + # + # Dynamic configuration file for NGINX Agent. + # + # The purpose of this file is to track agent configuration + # values that can be dynamically changed via the API and the agent install script. + # You may edit this file, but API calls that modify the tags on this system will + # overwrite the tag values in this file. + # + # The agent configuration values that API calls can modify are as follows: + # - tags + # + # The agent configuration values that the agent install script can modify are as follows: + # - instance_group + + instance_group: default + ``` + +4. Save the changes and exit the editor. +5. Restart the NGINX Agent: + + ```bash + sudo systemctl restart nginx-agent + ``` + +To verify an instance was added to an instance group: + +1. {{< include "nim/webui-nim-login.md" >}} +2. On the left menu, select **Instance Groups**. +3. Your instance group should be displayed in the list with the assigned instances. + +{{%/tab%}} + +{{%tab name="Command-Line Option"%}} + +### Adding Instances to Instance Groups with NGINX Agent {#add-instance-agent-install} + +To add an instance to an instance group when installing the NGINX Agent: + +1. Open a secure shell (SSH) connection to the NGINX instance and log in. +2. Download the NGINX Agent installation script: + + ```bash + curl https:///install/nginx-agent > install.sh + ``` + +3. Install the NGINX Agent and specify the instance group by using the `--instance_group` flag: + + ```bash + sudo sh ./install.sh --instance-group + ``` + + For example, the following command adds the instance to an instance group called `nginx-01`. + + ```bash + sudo sh install.sh --instance-group nginx-01 + ``` + +{{< important >}} +If the specified instance group doesn't already exist, the NGINX Agent installer will create it, using the current instance's NGINX config as the group's config file. This means that all instances added to the group later will use this config as well. If you're using a script to add instances, you should consider carefully which instance to run the script on first. +{{< /important >}} + +{{%/tab%}} +{{}} + + +--- + +## Update Instance Groups + +To edit the display name or description for an instance group: + +1. {{< include "nim/webui-nim-login.md" >}} +2. On the left menu, select **Instance Groups**. +3. Locate the instance group you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit**. + +--- + +## Delete Instance Groups + +To delete an instance group in the web interface, perform the following: + +1. {{< include "nim/webui-nim-login.md" >}} +2. On the left menu, select **Instance Groups**. +3. Locate the instance group you want to delete. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Delete**. + +If the instance group you deleted was specified in the `agent-dynamic.conf` file of an instance, you'll need to remove the reference. Otherwise, upon restarting the NGINX Agent, the instance group will be recreated. + +1. Open a secure shell (SSH) connection to the NGINX instance and log in. +2. Open the `/var/lib/nginx-agent/agent-dynamic.conf` for editing. +3. Locate and remove or comment out the `instance_group: ` setting, similar to the following example: + + Example: + + ```yaml + # + # /var/lib/nginx-agent/agent-dynamic.conf + # + # Dynamic configuration file for NGINX Agent. + # + # The purpose of this file is to track agent configuration + # values that can be dynamically changed via the API and the agent install script. + # You may edit this file, but API calls that modify the tags on this system will + # overwrite the tag values in this file. + # + # The agent configuration values that API calls can modify are as follows: + # - tags + # + # The agent configuration values that the agent install script can modify are as follows: + # - instance_group + + # instance_group: default + ``` + +4. Save the changes and exit the editor. +5. Restart the NGINX Agent: + + ```bash + sudo systemctl restart nginx-agent + ``` + +--- + +## Permission for Instance Groups + +See [Set Up RBAC]({{< relref "/nim/admin-guide/rbac/overview-rbac.md" >}}), for detail information on setting up role-based access control (RBAC) for Instance Groups. + +{{}} +Members of Instance Group automatically inherit role-based access control (RBAC) permissions from their parent. +{{}} + +## Publishing to Instance Groups + +- For instructions on publishing to instance groups, see the topic [Publish NGINX configs]({{< relref "/nim/nginx-configs/publish-configs.md" >}}). + +## Additional Information Regarding Instance Groups + +When updating Instance Group NGINX config using the UI or API, only the currently "online" members of Instance Group will be affected. Newly registered Instance or +reconnected Instance should get NGINX config updated automatically to the last "successful" published NGINX config. + +A NGINX config update to Instance Group is considered "successful" with one of the following conditions: + +- Instance Group does not have a member Instance online +- Any Instance Group member reported "successful" to the NGINX config update + +{{}} +Check the Instance details page for the last NGINX config publish status. +{{}} + +### Common Usage of Instance Groups + +Instance Groups can be used for the following workflows: + +- Preset NGINX config for new Instances, i.e. containerized Instances +- Group permissions for a set of Instances that share the same NGINX config diff --git a/content/nim/nginx-instances/scan-instances.md b/content/nim/nginx-instances/scan-instances.md new file mode 100644 index 000000000..31fd53d7f --- /dev/null +++ b/content/nim/nginx-instances/scan-instances.md @@ -0,0 +1,290 @@ +--- +description: Follow the steps in this guide to scan for and discover NGINX instances. +docs: DOCS-828 +doctypes: +- tutorial +tags: +- docs +title: Scan and discover NGINX instances +toc: true +weight: 110 +--- + + + +{{< shortversions "2.0.0" "latest" "nimvers" >}} + +## Prerequisites {#prerequisites} + +1. [Install Instance Manager]({{< relref "/nim/deploy/vm-bare-metal/_index.md" >}}). +2. Start and enable Instance Manager. + +{{}} +To update the CVE list manually or offline, refer to the [Offline Installation]({{}}) guide. +{{}} + +Host discovery, the first stage in instance discovery, is used to enumerate live hosts on a given network. However, in certain environments, Internet Control Message Protocol (ICMP) echo requests are disabled. The Instance Manager API provides a method for disabling host discovery in scanning. + +```shell +curl --request POST \ + --url https:///api/platform/v1/servers/scan \ + --header 'Authorization: Bearer .' \ + --header 'content-type: application/json' \ + --data '{"cidr": "192.0.2.0/24","hostDiscovery": "none","portRanges": ["80","443"]}' +``` + +If no host discovery options are provided, Instance Manager sends an ICMP echo request to each host on the network. + +## Scan using the web interface {#scan-ui} + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. +2. In the left menu, select **Scan**. +3. Enter subnets and masks that correspond to your network. + +{{}} +To scan a single address, use the netmask of `/32` after the IP. This is the equivalent of scanning a single subnet. If you enter the wrong subnet, the scan may take longer than expected before erroring. + +There's a CVE that's not reported for NGINX that involves [unfiltered logging](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4487). This CVE won't be fixed, has a severity of "None," and is excluded from our scans' CVE list. +{{}} + +--- + +## Scan using the API {#scan-api} + +To start a scan using the Instance Manager API, send a POST request similar to the following example to the Scan endpoint, `https:///api/platform/v1/servers/scan`. + +```shell +curl --request POST \ + --url https:///api/platform/v1/servers/scan \ + --header 'Authorization: Bearer ' \ + --header 'content-type: application/json' \ + --data '{"cidr": "192.0.2.0/24","portRanges": ["80","443"]}' +``` + +The response looks similar to the following example: + +```json +{ + "completionTime": "2021-12-02T00:27:34.517919044Z", + "duration": 13110, + "endpointsScanned": 508, + "nginxFound": 6, + "percentageComplete": 100, + "request": { + "cidr": "192.0.2.0/24", + "hostDiscovery": "icmp", + "portRanges": [ + "80", + "443" + ] + }, + "serversFound": 6, + "status": "complete" +} +``` + +To get the scanned servers, send a GET request to the Servers endpoint: + +```shell +curl -X GET "https:///api/v1/servers" -H "accept: application/json" -H 'Authorization: Bearer ' +``` + +The result looks similar to the following: + +
    + + Scan JSON response + +```json +{ + "items": [ + { + "certs": [], + "createTime": "2021-12-01T19:00:27.514Z", + "cves": [ + { + "advisory": "http://mailman.nginx.org/pipermail/nginx-announce/2021/000300.html", + "description": "1-byte memory overwrite in resolver", + "id": "2021-23017", + "severity": "medium" + } + ], + "links": [], + "network": [ + { + "ip": "192.168.56.2", + "port": 80 + } + ], + "serverApplication": "nginx", + "updateTime": "2021-12-01T19:00:27.514Z", + "vendor": "Ubuntu", + "version": "1.18.0" + }, + { + "certs": [], + "createTime": "2021-12-01T19:00:27.514Z", + "cves": [ + { + "advisory": "http://mailman.nginx.org/pipermail/nginx-announce/2021/000300.html", + "description": "1-byte memory overwrite in resolver", + "id": "2021-23017", + "severity": "medium" + } + ], + "links": [], + "network": [ + { + "ip": "192.168.56.1", + "port": 80 + } + ], + "serverApplication": "nginx", + "updateTime": "2021-12-01T19:00:27.514Z", + "vendor": "Ubuntu", + "version": "1.18.0" + }, + { + "certs": [], + "createTime": "2021-12-01T19:00:27.515Z", + "cves": [ + { + "advisory": "http://mailman.nginx.org/pipermail/nginx-announce/2021/000300.html", + "description": "1-byte memory overwrite in resolver", + "id": "2021-23017", + "severity": "medium" + } + ], + "links": [], + "network": [ + { + "ip": "192.168.56.2", + "port": 443 + } + ], + "serverApplication": "nginx", + "updateTime": "2021-12-01T19:00:27.515Z", + "vendor": "Ubuntu", + "version": "1.18.0" + }, + { + "certs": [ + { + "endpoint": { + "ip": "192.168.56.1", + "port": 443 + }, + "metadata": { + "authorityKeyIdentifier": "", + "commonName": "manager-server", + "expired": false, + "expiry": 3161225998, + "issuer": "C=US, ST=Washington, L=Seattle, Inc., O=F5\\, OU=Controller, CN=apigw-svc", + "publicKeyType": "RSA (2048 bit)", + "serialNumber": "290091060211653667347751276868955784795456843516", + "signatureAlgorithm": "SHA256-RSA", + "subject": "C=US, ST=Washington, L=Seattle, Inc., O=F5\\, OU=Controller, CN=manager-server", + "subjectAlternativeName": "manager-server", + "subjectKeyIdentifier": "", + "thumbprint": "E0:B3:53:81:59:28:B6:C5:88:41:11:8D:B3:E2:B1:C8:D4:32:C1:6B:46:8D:B4:45:37:75:2E:9B:29:C2:A1:70", + "thumbprintAlgorithm": "SHA-256", + "validFrom": "2021-11-20T06:48:59Z", + "validTo": "2122-02-04T06:48:59Z", + "version": 3 + } + } + ], + "createTime": "2021-12-01T19:00:27.516Z", + "links": [], + "network": [ + { + "ip": "192.168.56.1", + "port": 443 + } + ], + "serverApplication": "nginx", + "updateTime": "2021-12-01T19:00:27.516Z" + }, + { + "certs": [ + { + "endpoint": { + "ip": "192.168.56.3", + "port": 443 + }, + "metadata": { + "authorityKeyIdentifier": "", + "commonName": "manager-server", + "expired": false, + "expiry": 3161225998, + "issuer": "C=US, ST=Washington, L=Seattle, Inc., O=F5\\, OU=Controller, CN=apigw-svc", + "publicKeyType": "RSA (2048 bit)", + "serialNumber": "290091060211653667347751276868955784795456843516", + "signatureAlgorithm": "SHA256-RSA", + "subject": "C=US, ST=Washington, L=Seattle, Inc., O=F5\\, OU=Controller, CN=manager-server", + "subjectAlternativeName": "manager-server", + "subjectKeyIdentifier": "", + "thumbprint": "E0:B3:53:81:59:28:B6:C5:88:41:11:8D:B3:E2:B1:C8:D4:32:C1:6B:46:8D:B4:45:37:75:2E:9B:29:C2:A1:70", + "thumbprintAlgorithm": "SHA-256", + "validFrom": "2021-11-20T06:48:59Z", + "validTo": "2122-02-04T06:48:59Z", + "version": 3 + } + } + ], + "createTime": "2021-12-01T19:00:27.516Z", + "links": [], + "network": [ + { + "ip": "192.168.56.3", + "port": 443 + } + ], + "serverApplication": "nginx", + "updateTime": "2021-12-01T19:00:27.516Z" + }, + { + "certs": [], + "createTime": "2021-12-02T00:27:34.507Z", + "cves": [ + { + "advisory": "http://mailman.nginx.org/pipermail/nginx-announce/2021/000300.html", + "description": "1-byte memory overwrite in resolver", + "id": "2021-23017", + "severity": "medium" + } + ], + "links": [], + "network": [ + { + "ip": "192.168.56.3", + "port": 80 + } + ], + "serverApplication": "nginx", + "updateTime": "2021-12-02T00:27:34.507Z", + "vendor": "Ubuntu", + "version": "1.18.0" + } + ] +} +``` + +
    + +--- + +## Troubleshooting + +### Scan reports NGINX versions as "undefined" when NGINX App Protect is enabled + +#### Description + +- When [scanning for NGINX instances]({{< relref "/nim/nginx-instances/scan-instances.md" >}}), the NGINX version is reported as `undefined` when NGINX App Protect is installed. + +#### Resolution + +- This behavior is **by design**. As a security precaution when NGINX App Protect is installed, the NGINX server does not report its version in any HTTP headers. The **NGINX Plus** and **Instances** pages in the web interface will continue to report the NGINX and NGINX App Protect versions. diff --git a/content/nim/reference/_index.md b/content/nim/reference/_index.md new file mode 100644 index 000000000..ebefe9a4f --- /dev/null +++ b/content/nim/reference/_index.md @@ -0,0 +1,7 @@ +--- +description: "This section includes the reference documentation for the Instance Manager." +title: Reference +weight: 750 +url: /nginx-instance-manager/reference/ +draft: true +--- diff --git a/content/nim/reference/api/_index.md b/content/nim/reference/api/_index.md new file mode 100644 index 000000000..595d156c8 --- /dev/null +++ b/content/nim/reference/api/_index.md @@ -0,0 +1,6 @@ +--- +title: API +description: API docs +draft: true +url: /nginx-instance-manager/reference/api/ +--- \ No newline at end of file diff --git a/content/nim/reference/api/api.md b/content/nim/reference/api/api.md new file mode 100644 index 000000000..b0339b951 --- /dev/null +++ b/content/nim/reference/api/api.md @@ -0,0 +1,9 @@ +--- +description: Instance Manager API Reference +docs: DOCS-809 +draft: true +title: API Reference +toc: true +weight: 300 +--- + diff --git a/content/nim/reference/cli/_index.md b/content/nim/reference/cli/_index.md new file mode 100644 index 000000000..e25f5b764 --- /dev/null +++ b/content/nim/reference/cli/_index.md @@ -0,0 +1,6 @@ +--- +title: CLI Reference +description: Command-line reference docs +draft: true +url: /nginx-instance-manager/reference/cli/ +--- \ No newline at end of file diff --git a/content/nim/reference/cli/agent-cli.md b/content/nim/reference/cli/agent-cli.md new file mode 100644 index 000000000..438f8fb69 --- /dev/null +++ b/content/nim/reference/cli/agent-cli.md @@ -0,0 +1,53 @@ +--- +description: Man page and instructions for using the NGINX Agent CLI +docs: DOCS-814 +draft: true +title: Agent CLI Reference +toc: true +weight: 300 +--- + + + + +{{< shortversions "2.0.0" "latest" "nimvers" >}} + +{{%heading "overview"%}} + +This document is intended to help people run NGINX Agent on the command line. The agent runs as a linux daemon and sends information to the Instance Manager server over grpc polling every 1 seconds. + +## Prerequisites {#prerequisites} + +Install NGINX Agent and know where the binary location is. By default the packager installs nginx-manager to /usr/sbin/nginx-agent. + +## Usage {#usage} + +`nginx-agent` is the CLI binary for NGINX agent. + +```bash +Usage: + nginx-agent [flags] + +Flags: + --api-token string set token to auth to commander & metrics service + --config-dirs string set comma-separated list of allowed config directories (default "/etc/nginx") + -h, --help help for nginx-agent + --log-level string set log level (panic, fatal, error, info, debug, trace, info) (default "info") + --log-path string set log path. If empty, logs only to stdout/stderr instead (default "/var/log/nginx-agent") + --metadata stringToString set metadata for the specific instance/machine. Each entry is a key/value pair separated by an equals sign. (default []) + --metrics-mode string set type of nginx metrics collected (nim, controller) (default "nim") + --metrics-server string set gRPC port of the metrics server to connect to + --nginx-bin-path string set path to the NGINX Binary + --nginx-exclude-logs string set comma-separated list of NGINX access log paths to exclude from metrics + --nginx-metrics-poll-interval duration set metrics poll interval (default 1s) + --nginx-pid-path string set path to the NGINX PID file + --nginx-plus-api string set NGINX plus status api URL (see nginx.org/r/api) + --nginx-stub-status string set NGINX stub status URL (see: nginx.org/r/stub_status) + --server string set gRPC port of the server to connect to (default "localhost:10000") + --tags strings set comma-separated list of tags for this specific instance / machine for inventory purposes + --tls-ca string set path to CA certificate file + --tls-cert string set path to certificate file + --tls-enable set to True for grpcs or False for offloading grpc without encrypting grpcs on nginx-manager. Omit for no encryption. + --tls-key string set path to the certificate key file + -v, --version version for nginx-agent +``` diff --git a/content/nim/reference/cli/server-cli.md b/content/nim/reference/cli/server-cli.md new file mode 100644 index 000000000..4e0bb0961 --- /dev/null +++ b/content/nim/reference/cli/server-cli.md @@ -0,0 +1,49 @@ +--- +description: Man page and instructions for using the Instance Manager CLI +docs: DOCS-815 +draft: true +title: Server CLI Reference +toc: true +weight: 300 +--- + + + + +{{< shortversions "2.0.0" "latest" "nimvers" >}} + +{{%heading "overview"%}} + +This document is intended to help people run Instance Manager on the command line. + +## Prerequisites {#prerequisites} + +Install Instance Manager and know where the binary location is. By default the packager installs nginx-manager to /usr/sbin/nginx-manager. + +## Usage {#usage} + +`nginx-manager` is the CLI binary for Instance Manager. + +```bash +Usage: + nginx-manager [flags] + +Flags: + --audit-log string set API access log path + --auth enable auth checks on server + --bind-address string set the bind address for all service ports (default "localhost") + --cert string Path to x.509 certificate file + --gateway-port string set gRPC-gateway service port for API and UI (default "11000") + --grpc-port string set gRPC service port for agent communication (default "10000") + -h, --help help for nginx-manager + --key string set path to x.509 certificate key file + --license string set path to the license file + --log-level string set log level (panic, fatal, error, info, debug, trace, info) (default "info") + --log-path string set log path and if empty log only to stdout/stderr (default "/var/log/nginx-manager/") + --login enable temp login page + --metrics-storage-path string set storage path on disk for metrics (default "/tmp/metrics") + --rbac enable Role-Based Access Control + --server-name string set the bind address for all service ports + --skip-validation disable NGINX config validation in editor + -v, --version version for nginx-manager +``` diff --git a/content/nim/releases/_index.md b/content/nim/releases/_index.md new file mode 100644 index 000000000..fc02e0688 --- /dev/null +++ b/content/nim/releases/_index.md @@ -0,0 +1,7 @@ +--- +title: Releases +description: "Stay up-to-date with the latest [Instance Manager](https://www.nginx.com/products/nginx-instance-manager/) release." +weight: 1000 +url: /nginx-instance-manager/releases/ +--- + diff --git a/content/nim/releases/known-issues.md b/content/nim/releases/known-issues.md new file mode 100644 index 000000000..40b6d2007 --- /dev/null +++ b/content/nim/releases/known-issues.md @@ -0,0 +1,1842 @@ +--- +description: This document lists and describes the known issues and possible workarounds + in F5 NGINX Management Suite Instance Manager. Fixed issues are removed after **45 + days**. +docs: DOCS-937 +tags: +- docs +title: Known Issues +toc: true +weight: 200 +--- + +{{}} + +{{< tip >}}We recommend you upgrade to the latest version of Instance Manager to take advantage of new features, improvements, and bug fixes.{{< /tip >}} + + +--- + +## 2.17.0 +July 10, 2024 + +### {{% icon-resolved %}} Mismatch in date formats in custom date selection on NGINX usage graph {#45512} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 45512 | Fixed in Instance Manager 2.18.0 | + +{{}} + +#### Description +The months in the custom date range were not displayed correctly because NGINX Instance Manager assumed the data format was in the US timezone. + +--- + +### {{% icon-resolved %}} Failure to notify user when template configuration publish fails {#44975} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44975 | Fixed in Instance Manager 2.18.0 | + +{{}} + +#### Description +When publishing a configuration template fails, the system only displays "Accepted" without providing the final result, such as "Success" or "Failure." + +--- + +### {{% icon-bug %}} NGINX Agent 2.36.0 fails to validate certain NGINX configurations in F5 NGINX Instance Manager 2.17.0 {#45153} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 45153 | Open | + +{{}} +#### Description +In NGINX Instance Manager 2.17.0, an "invalid number of arguments" error appears in the web interface when using specific configuration parameters in NGINX Agent 2.36.0. + +#### Workaround + +Install NGINX Agent **2.35.1** if you're using NGINX Instance Manager 2.17.0. This version is included with NGINX Instance Manager 2.17.0 by default. + +If you're installing NGINX Agent from package files, follow the steps in the [Installing NGINX Agent](https://github.com/nginx/agent?tab=readme-ov-file#installing-nginx-agent-from-package-files) guide. + +--- + +### {{% icon-bug %}} Web Analytics are not enabled after upgrading Instance Manager when keeping existing nms-http.conf {#45131} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 45131 | Open | + +{{}} +#### Description +When using NGINX Instance Manager, you configure OIDC by manually editing the /etc/nginx/conf.d/nms-http.conf and /etc/nms/nms.conf files. + +During the upgrade to 2.17.0, the user is asked if they would like to keep their own nms-http.conf, or replace it with the new default. As Web Analytics are enabled via the /etc/nginx/conf.d/nms-http.conf file, if a user decides to keep their own config when prompted during upgrade, these will not get enabled. + +#### Workaround + +To keep the existing nms-http.conf file while maintaining the web analytics functionality, add the following to "/etc/nginx/conf.d/nms-http.conf" , inside the `/ui` location block: + +```text +add_header Content-Security-Policy "default-src 'none'; block-all-mixed-content; frame-ancestors 'self'; object-src 'none'; manifest-src 'self'; script-src 'self' https://*.walkme.com 'unsafe-inline' 'unsafe-eval'; style-src 'self' https://*.walkme.com fonts.googleapis.com 'unsafe-inline'; img-src 'self' https://*.walkme.com s3.walkmeusercontent.com d3sbxpiag177w8.cloudfront.net data:; font-src 'self' https://*.walkme.com data: https://fonts.gstatic.com; connect-src 'self' https://*.walkme.com; frame-src 'self' https://*.walkme.com blob:; worker-src 'self' blob: https://*.walkme.com;"; +``` + +--- + +### {{% icon-bug %}} Failure to retrieve instance configuration when NAP-enabled instance doesn't register properly {#45113} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 45113 | Open | + +{{}} +#### Description +If NGINX Agent is configured to monitor NGINX App Protect before App Protect is installed, NGINX Agent will send an empty App Protect metadata structure to NGINX Instance Manager. This causes Instance Manager to fail to register the NGINX instance properly. + +#### Workaround + +Edit the "/etc/nginx-agent/nginx-agent.conf" file and configure "precompiled_publication" as "false". Then restart the nginx-agent process running `sudo systemctl restart nginx-agent`. + +--- + +### {{% icon-resolved %}} Editing template submissions now allows for using most recent template version {#44971} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44971 | Fixed in Instance Manager 2.17.0 | + +{{}} +#### Description +When editing a template submission, you can now choose between using a snapshot of the template from when it was first deployed or the latest version of the template. **Important:** Note that if you use the latest version, changes to the templates might make an augment template incompatible with a base template, causing the publication to the data plane to fail. + +--- + + +## 2.16.0 +April 18, 2024 + +### {{% icon-resolved %}} Editing template submissions uses the latest versions, may cause "malformed" errors {#44961} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44961 | Fixed in Instance Manager 2.17.0 | + +{{}} +#### Description +When editing a template submission, the system currently uses the latest template files instead of the specific snapshot of files associated with the submission. The latest template files might not be well-formed and can cause errors when generating a configuration. This can lead to an error message saying "malformed." + +#### Workaround + +Use caution when editing template submissions. If you encounter a "malformed" error, check the template for any changes that could have caused the issue. + +To keep a template from being changed accidentally, set it to "Ready for Use" by doing the following: + +1. Go to **Templates**. +2. Find the template you want to lock and click the **Actions** button (three dots). +3. Select **Edit**. +4. Select the **Ready for Use** option. + +If you need to modify a template that you have already submitted, create a copy instead of editing the original: + +1. On the **Templates** page, locate the template you want to edit. +2. Select the **Actions** button and choose **Edit Template Files**. +3. Select **Save As** to duplicate the template, then give it a name. + +--- + +### {{% icon-resolved %}} REST API does not work until you log into the web interface first {#44877} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44877 | Fixed in Instance Manager 2.17.0 | + +{{}} +#### Description +If you get an "Error accessing resource: forbidden" message while using the NGINX Instance Manager REST API, try logging into the web interface. After logging in, you should be able to use the API. + +--- + + +## 2.15.0 +December 12, 2023 + +### {{% icon-resolved %}} Unable to use NMS Predefined Log Profiles for NAP 4.7 {#44759} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44759 | Fixed in Instance Manager 2.15.1 | + +{{}} +#### Description +The predefined NGINX Management Suite Log Profiles are incompatible with NGINX App Protect 4.7. + +#### Workaround + +To use the NGINX Management Suite predefined log profiles with NGINX App Protect 4.7 follow these steps: + +1. Retrieve the content of the NMS predefined log profile through the NMS Log Profile APIs, accessible in the (Manage WAF Security Policies and Security Log Profiles) section. +1. Decode the content obtained in the previous step using base64 encoding. +1. Modify the "max_request_size" and "max_message_size" values within the decoded content to the following: + + **"max_request_size": "2k", "max_message_size": "32k"** + +1. Create a custom log profile using the NMS Log Profile APIs, incorporating the base64 encoded content from the adjusted configuration. +1. Update your NGINX configuration to reference the new custom log profile in the NGINX App Protect log profile directive. + +--- + +### {{% icon-resolved %}} Helm chart backup and restore is broken in NIM 2.15.0 {#44758} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44758 | Fixed in Instance Manager 2.15.1 | + +{{}} +#### Description +Helm backup and restore will not run in 2.15.0 due to an underlying change in the dqlite client. Customers are advised to upgrade to 2.15.1. + +#### Workaround + +Upgrade to NGINX Instance Manager 2.15.1. + +--- + +### {{% icon-bug %}} Some NGINX Management Suite features not available after adding license {#44698} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44698 | Open | + +{{}} +#### Description +After adding a license, some NGINX Management Suite features might be disabled, even if they are included in the license. + +#### Workaround + +Restart NGINX Management Suite to make all the features available for use. To restart NGINX Management Suite, open a terminal on the host and run the command: + +```shell +sudo systemctl restart nms +``` + +--- + +### {{% icon-resolved %}} Users receive login error when NGINX Management Suite is deployed in Kubernetes {#44686} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44686 | Fixed in Instance Manager 2.17.0 | + +{{}} +#### Description +After deploying NGINX Management Suite in a Kubernetes environment, when a user tries to log on for the first time, a generic error is displayed. + +#### Workaround + +Refreshing the browser clears the error and allows the user to log on. + +--- + +### {{% icon-bug %}} Licenses for NGINX Plus applied prior to Instance Manager 2.15 don't show the full feature set {#44685} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44685 | Open | + +{{}} +#### Description +With the introduction of Instance Manager 2.15, we are expanding the features available for some licenses, such as those with only NGINX Plus entitlement. If such a license was applied before upgrading to 2.15, the expanded set of features will not be available as intended. + +#### Workaround + +Terminate the license applied previously. Re-apply the license. + +--- + + +## 2.14.0 +October 16, 2023 + +### {{% icon-resolved %}} NGINX App Protect Attack Signature, Threat Campaign and Compiler fail to download {#44603} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44603 | Fixed in Instance Manager 2.15.0 | + +{{}} +#### Description +NGINX App Protect Attack Signatures package, Threat Campaigns package, and WAF Compiler can fail to download automatically with an error similar to the following: + +```none +Oct 20 22:22:57 ip-127-0-0-1 [9553]: 2023-10-20T22:22:57.648Z ERROR 81c818dd-ffff-aaaa-8b9d-134a60020d20 authz/authz.go:245 failed to get license status: getting license status: Get "http://unix-socket/api/platform/v1/license/status": context deadline exceeded +Oct 20 22:22:57 ip-127-0-0-1 [9527]: 2023-10-20T22:22:57.653Z ERROR nms-integrations compiler-controller/security_updates_downloader.go:94 security_updates_downloader: error when creating the nginx repo retriever - unexpected status when retrieving certs: 500 Internal Server Error +``` + +#### Workaround + +Download manually the latest [Attack Signatures package, Threat Campaign package]({{< relref "/nim/nginx-app-protect/setup-waf-config-management.md#manually-update-packages" >}}), and [WAF Compiler]({{< relref "/nim/nginx-app-protect/setup-waf-config-management.md#install-the-waf-compiler" >}}). + +--- + +### {{% icon-resolved %}} Missing Data when ClickHouse services are not running {#44586} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44586 | Fixed in Instance Manager 2.15.0 | + +{{}} +#### Description +The ClickHouse database service is a required component of the Instance Manager Dashboard. The dashboard may display an error message if the ClickHouse service does not start or quits unexpectedly. + +#### Workaround + +Restart the Clickhouse service. + +--- + +### {{% icon-bug %}} Scan results may not include CVE count with App Protect installed {#44554} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44554 | Open | + +{{}} +#### Description +When using the Scan feature, the CVE column may provide a value of '--' for instances running specific versions of NGINX App Protect, including App Protect 4.4 and potentially others. + +--- + +### {{% icon-bug %}} Certain instances not showing in the Network Utilization drawer {#44547} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44547 | Open | + +{{}} +#### Description +Under certain conditions, instances that are not reporting request totals may not show in the Network Utilization panel or drawer when data is sorted by Request count. This typically happens when NGINX is not configured to stream metrics data to NGINX Agent. + +#### Workaround + +Configure NGINX Plus or NGINX Stub Status APIs to send correctly the NGINX metrics using NGINX Agent. See the [Metrics]({{< relref "/nim/monitoring/overview-metrics.md" >}}) documentation to learn more. + +--- + +### {{% icon-bug %}} Issues sorting HTTP errors in the dashboard {#44536} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44536 | Open | + +{{}} +#### Description +Sorting HTTP errors by “Request Count” sometimes shows the data in an incorrect order. + +--- + +### {{% icon-bug %}} NGINX Agent does not report NGINX App Protect status {#44531} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44531 | Open | + +{{}} +#### Description +NGINX Agent does not report NGINX App Protect as "Active" when the Attack Signature or Threat Campaign version is newer than 2023.10.01. + +#### Workaround + +[Upgrade NGINX Agent](https://docs.nginx.com/nginx-agent/installation-upgrade) to version v2.30.1 or later. + +--- + +### {{% icon-bug %}} Built-in security policies may not be accessible {#44520} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44520 | Open | + +{{}} +#### Description +Users might not have permission to access the built-in policies (NginxDefaultPolicy and NginxStrictPolicy) while using NGINX Management Suite. + +#### Workaround + +Use RBAC to assign the following permissions to the user: +- (At minimum) READ access to any other custom security policy +or +- READ access to the security policy feature: `/api/platform/v1/security/policies` + +--- + +### {{% icon-resolved %}} Data on the dashboard is updating unexpectedly {#44504} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44504 | Fixed in Instance Manager 2.15.0 | + +{{}} +#### Description +Dashboard data may update unexpectedly when opening a drawer view. The updated data accurately represents the latest available information about your NGINX instances. + +--- + +### {{% icon-resolved %}} Instances reporting incorrect memory utilization {#44351} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44351 | Fixed in Instance Manager 2.15.0 | + +{{}} +#### Description +An upgrade to NGINX Agent v2.30 or later is required for instances to stream memory utilization data correctly. Note that even after the upgrade, historical data recorded before the upgrade will not be correct. + +#### Workaround + +[Upgrade NGINX Agent](https://docs.nginx.com/nginx-agent/installation-upgrade) to version v2.30 or later. + +--- + + +## 2.13.1 +September 05, 2023 + +### {{% icon-bug %}} Certificates may not appear in resource group {#44323} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44323 | Open | + +{{}} +#### Description +If you have certificates that were added to NGINX Management Suite before upgrading, they may not appear in the list of available certs when creating or editing a resource group. + +#### Workaround + +Restarting the DPM process will make all certificates visible in the Resource Group web interface and API. + +For VM and bare metal deployments: +```shell +sudo systemctl restart nms-dpm +``` + +For Kubernetes deployments: + +```shell +kubectl -n nms scale --replicas=0 deployment.apps/dpm +kubectl -n nms scale --replicas=1 deployment.apps/dpm +``` + +--- + + +## 2.13.0 +August 28, 2023 + +### {{% icon-resolved %}} Access levels cannot be assigned to certain RBAC features {#44277} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44277 | Fixed in Instance Manager 2.13.1 | + +{{}} +#### Description +When configuring role-based access control (RBAC), you can't assign access levels to some features, including Analytics and Security Policies. + +--- + +### {{% icon-bug %}} If you publish a configuration with an uncompiled policy, it will fail the first time {#44267} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44267 | Open | + +{{}} +#### Description +In Instance Manager 2.13, a new configuration is published before the compile stage of a WAF policy is complete. This happens only when the policy is first referenced. This leads to a deployment failure, and the configuration rolls back. Typically, by the time you try to submit the configuration again, the policy has finished compiling, and the request goes through. + +The initial failure message looks like this: + +```text +Config push failed - err: failure from multiple instances. Affected placements: instance/70328a2c-699d-3a90-8548-b8fcec15dabd (instance-group: ig1) - err: failed building config payload: config: aux payload /etc/nms/NginxDefaultPolicy.tgz for instance:70328a2c-699d-3a90-8548-b8fcec15dabd not ready aux payload not ready, instance/2e637e08-64b3-36f9-8f47-b64517805e98 (instance-group: ig1) - err: failed building config payload: config: aux payload /etc/nms/NginxDefaultPolicy.tgz for instance:2e637e08-64b3-36f9-8f47-b64517805e98 not ready aux payload not ready +``` + +#### Workaround + +Retry pushing the new configuration. The deployment should work the second time around. + +--- + +### {{% icon-resolved %}} Validation errors in Resource Groups for certificates uploaded before 2.13 upgrade {#44254} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44254 | Fixed in Instance Manager 2.13.1 | + +{{}} +#### Description +If you upgrade to Instance Manager 2.13 and already have certificates in place, you may encounter validation errors in the web interface when you try to create or edit a Resource Group and access the Certs list. You will not be able to save the Resource Group if you encounter these errors. + +This issue doesn't occur if you upload certificates _after_ upgrading to version 2.13, nor does it affect new 2.13 installations. Instance Groups and Systems are unaffected. + +#### Workaround + +To work around this issue, you have two options: + +1. When creating or editing a Resource Group, don't use the Certs list. Instance Groups and Systems can still be used. +2. If you must use Resource Groups with Certs, delete any certificates that were uploaded before upgrading to 2.13, and then re-upload them. + +--- + +### {{% icon-bug %}} getAttackCountBySeverity endpoint broken with NGINX App Protect 4.4 and above {#44051} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44051 | Open | + +{{}} +#### Description +The reporting of severities has been disabled in NGINX App Protect 4.4. As a result, the `getAttackCountBySeverity` endpoint on the NGINX Management Suite's API will report zeroes for all severities, and the related "Severity" donut diagram in the Security Monitoring Dashboard won't display any values. + +--- + +### {{% icon-bug %}} Inaccurate Attack Signatures and Threat Campaigns versions {#43950} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43950 | Open | + +{{}} +#### Description +If `precompiled_publication` is set to `true`, NGINX Management Suite may incorrectly report the version of Attack Signatures (AS) and Threat Campaigns (TC) that you previously installed on the NAP WAF instance. + +--- + + +## 2.12.0 +July 20, 2023 + +### {{% icon-bug %}} Licensing issues when adding JWT licenses in firewalled environments {#43719} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43719 | Open | + +{{}} +#### Description +If firewall rules prevent access to F5 servers, attempting to license NGINX Management Suite with a JWT license may result in the product being unable to terminate the license or upload another one, even if connectivity is restored. + +#### Workaround + +To fix this issue, follow the steps below for your environment type. + +
    + +##### Virtual Machine or Bare Metal + +1. Stop the integrations service: + + ``` bash + sudo systemctl stop nms-integrations + ``` + +2. Delete the contents of `/var/lib/nms/dqlite/license` + +3. Start the integrations service: + + ```bash + sudo systemctl start nms-integrations + ``` + +4. Upload a valid S/MIME license. + + Alternatively, to use a JWT license, make sure to allow inbound and outbound access on port 443 to the following URLs: + + - https://product.apis.f5.com + - https://product-s.apis.f5.com/ee + +##### Kubernetes + +1. Run the following command to stop the integrations service by scaling down: + + ```bash + kubectl -n nms scale --replicas=0 deployment.apps/integrations + ``` +2. Access the Dqlite volume for the integrations service and delete the contents of `/var/lib/nms/dqlite/license`. + +3. Run the following command to start the integrations service by scaling up: + + ```bash + kubectl -n nms scale --replicas=1 deployment.apps/integrations + ``` + +4. Upload a valid S/MIME license. + + Alternatively, to use a JWT license, make sure to allow inbound and outbound access on port 443 to the following URLs: + + - https://product.apis.f5.com + - https://product-s.apis.f5.com/ee + +--- + +### {{% icon-bug %}} On Kubernetes, uploading a JWT license for NGINX Management Suite results in the error "secret not found" {#43655} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43655 | Open | + +{{}} +#### Description +When uploading a JWT license to an NGINX Management Suite deployment on Kubernetes, you may see error messages in the web interface and logs similar to the following example: + +
    [ERROR] /usr/bin/nms-integrations   license/secrets.go:100    jwt-manager: failed to get [secret=dataEncryptionKey] from remote store. secret not found
    + +#### Workaround + +This error can be resolved by deleting the integrations pod and restarting it. You can do this by running the following command on the NGINX Management Suite host: + +```bash +kubectl -n nms scale --replicas=0 deployment.apps/integrations; kubectl -n nms scale --replicas=1 deployment.apps/integrations +``` + +--- + +### {{% icon-bug %}} Upgrading to 2.12 disables telemetry {#43606} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43606 | Open | + +{{}} +#### Description +Upgrading to Instance Manager 2.12 will stop NGINX Management Suite from transmitting telemetry. + +#### Workaround + +Toggle the telemetry setting off and on. You can do this by selecting **Settings > License** from the NGINX Management Suite web interface. + +--- + +### {{% icon-bug %}} A JWT license for an expired subscription cannot be terminated from the web interface {#43580} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43580 | Open | + +{{}} +#### Description +When a JWT license from an expired subscription is uploaded to NGINX Management Suite, it cannot be replaced or terminated from the web interface. + +#### Workaround + +Upload a valid JWT or S/MIME license file using the Platform API. + +More information is available in the Platform API reference guide, under the License endpoint. In a web browser, go to the FQDN for your NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Documentation**. + +--- + +### {{% icon-resolved %}} An "unregistered clickhouse-adapter" failure is logged every few seconds if logging is set to debug. {#43438} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43438 | Fixed in Instance Manager 2.13.0 | + +{{}} +#### Description +If NGINX Management Suite logging is set to debug, it may log an "unregistered clickhouse-adapter" failure every few seconds. These logs do not affect the system's performance and can safely be ignored. + +#### Workaround + +Choose a less verbose logging level, such as warning, error, or info. + +--- + + +## 2.11.0 +June 12, 2023 + +### {{% icon-bug %}} Querying API endpoints for Security deployments associations may return empty UIDs for Attack-Signatures and Threat-Campaigns {#43034} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43034 | Open | + +{{}} +#### Description +When querying the following API endpoints for Security deployment associations, you may encounter results where the UID value for Attack-Signatures and Threat-Campaigns is empty. + +- /api/platform/v1/security/deployments/attack-signatures/associations +- /api/platform/v1/security/deployments/threat-campaigns/associations +- /api/platform/v1/security/deployments/associations/NginxDefaultPolicy + +#### Workaround + +To obtain the UID value for Attack-Signatures and Threat-Campaigns, you can query the following API endpoints: + +- /api/platform/v1/security/attack-signatures +- /api/platform/v1/security/threat-campaigns + +--- + +### {{% icon-bug %}} Publication status of instance groups may be shown as 'not available' after restarting NGINX Management Suite {#43016} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43016 | Open | + +{{}} +#### Description +After restarting the NGINX Management Suite services, the publication status of instance groups for deployments that include a security policy may show as "not available". + +#### Workaround + +Redeploy a new version of the security policy or an updated `nginx.conf`. + +--- + +### {{% icon-bug %}} When adding a Certs RBAC permission, the "Applies to" field may display as "nginx-repo" {#43012} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43012 | Open | + +{{}} +#### Description +In certain situations, when you update a certificate or key using the NGINX Management Suite web interface, and subsequently add or edit a Certificate permission for Role-Based Access Control (RBAC) in **Settings > Roles**, you may notice that the "Applies to" name appears as "nginx-repo". + +#### Workaround + +Use the unique identifier to assign specific permissions to a particular certificate and key pair. + +--- + +### {{% icon-bug %}} Agent 2.26 has issues when deployed in RHEL9 with SELinux {#43010} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43010 | Open | + +{{}} +#### Description +NGINX Agent 2.26, which is packaged with Instance Manager 2.11, may fail to start on RHEL 9 systems with SELinux enabled. An error similar to the following is logged: "Unable to read dynamic config". + +#### Workaround + +Use an earlier version of the NGINX Agent. You can install the NGINX Agent from [GitHub](https://github.com/nginx/agent) or the [NGINX Plus repository]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}). + +--- + +### {{% icon-bug %}} Error: "Failed to create secret" when reinstalling or upgrading NGINX Management Suite in Kubernetes {#42967} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42967 | Open | + +{{}} +#### Description +When deploying NGINX Management Suite in Kubernetes, if you have previously run the support package script and the output is still in the default location, you may encounter an error message similar to the following example when reinstalling or upgrading NGINX Management Suite: + +`Failed to create: Secret "sh.helm.release.v1.(release-name).v1"` + +#### Workaround + +Delete or move the support package output files: `nms-hybrid/support-package/k8s-support-pkg-*.tgz`. + +--- + +### {{% icon-bug %}} Updating Attack Signatures or Threat Campaigns on multiple instances simultaneously updates only one instance {#42838} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42838 | Open | + +{{}} +#### Description +When updating Attack Signatures or Threat Campaign packages on multiple instances simultaneously, only one instance may be successfully updated. An error similar to the following is logged: `security policy bundle object with given ID was not found.` + +#### Workaround + +Update the Attack Signatures or Threat Campaigns package one instance at a time. + +--- + + +## 2.10.0 +April 26, 2023 + +### {{% icon-resolved %}} Disk Usage in Metrics Summary shows incorrect data when multiple partitions exist on a system {#42999} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42999 | Fixed in Instance Manager 2.12.0 | + +{{}} +#### Description +The Disk Usage metric on the Metrics Summary page averages disk usage across all the partitions instead of summing it. + +--- + +### {{% icon-resolved %}} Unable to publish configurations referencing the log bundle for Security Monitor {#42932} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42932 | Fixed in Instance Manager 2.12.0 | + +{{}} +#### Description +Configuration deployments that reference the log bundle for Security Monitoring (app_protect_security_log "/etc/nms/secops_dashboard.tgz" syslog:server=127.0.0.1:514;), may fail with an error message similar to the following: + +```none +: error while retrieving Nginx App Protect profile bundle secops_dashboard info for NAP version 4.279.0: Not Found. Please create it first +``` + +#### Workaround + +On the NGINX Management Suite host, restart platform services: + +```bash +sudo systemctl restart nms +``` + +--- + +### {{% icon-resolved %}} Valid licenses incorrectly identified as invalid {#42598} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42598 | Fixed in Instance Manager 2.10.1 | + +{{}} +#### Description +Sometimes, valid licenses for NGINX Management Suite are incorrectly identified as invalid when uploaded. As a result, you may not be able to access features that require a valid license. + +--- + +### {{% icon-resolved %}} The Metrics module is interrupted during installation on Red Hat 9 {#42219} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42219 | Fixed in Instance Manager 2.11.0 | + +{{}} +#### Description +When installing the Metrics module on Red Hat 9, the following error will prevent it from finishing: + +```none +warning: Signature not supported. Hash algorithm SHA1 not available. +error: /tmp/nginx_signing.key: key 1 import failed. + +Failed to import nginx signing key. exiting. +``` + +#### Workaround + +Before installation, run the following command: + +```bash +sudo update-crypto-policies --set DEFAULT:SHA1 +``` + +After installation, we recommend you return the default to a more secure algorithm such as SHA256. + +--- + +### {{% icon-bug %}} When publishing a new version of Threat Campaign, the last two versions in the list cannot be selected {#42217} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42217 | Open | + +{{}} +#### Description +The list of Threat Campaigns will disappear when scrolling down, preventing the selection of the oldest versions. + +#### Workaround + +Threat Campaign versions can be published with the API using the route: `api/platform/v1/security/publish` + +--- + +### {{% icon-resolved %}} Duplicate Certificate and Key published for managed certificates {#42182} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42182 | Fixed in Instance Manager 2.11.0 | + +{{}} +#### Description +When deploying a configuration with a certificate and key handled by NGINX Management Suite to a custom file path, it may deploy a duplicate copy of the certificate and key to the default /etc/nginx/ path. When deleting the certificate and key, it will only delete the certificate and key in the custom path, leaving the duplicate copy. + +#### Workaround + +Manually delete the certificate and key from the /etc/nginx/ path. + +--- + +### {{% icon-bug %}} When upgrading to Instance Manager 2.10, there may be warnings from the Ingestion service {#42133} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42133 | Open | + +{{}} +#### Description +When upgrading to 2.10 you may see a warning like the below message for the NGINX Management Suite Ingestion service. It can be safely ignored. + +```none +[WARN] #011/usr/bin/nms-ingestion #011start/start.go:497 #011error checking migrations Mismatched migration version for ClickHouse, expected 39 migrations to be applied, currently have only 44 migrations applied. +``` + +--- + +### {{% icon-bug %}} When upgrading to Instance Manager 2.10, the API does not return lastDeploymentDetails for existing configurations {#42119} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42119 | Open | + +{{}} +#### Description +After upgrading to Instance Manager 2.10, the API does not return lastDeploymentDetails for existing configuration blocks. This is then reflected as "Invalid Date" in the UI (See #42108). + +#### Workaround + +Republish the configuration for the affected configuration blocks. + +--- + +### {{% icon-bug %}} When upgrading to Instance Manager 2.10, the publish status on App Security pages shows "Invalid Date" {#42108} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42108 | Open | + +{{}} +#### Description +After upgrading to Instance Manager 2.10, the publish status on App Security pages of Policies, Attack Signatures, and Threat Campaign shows "Invalid Date" until new configurations are published to the instance or instance group. + +--- + +### {{% icon-resolved %}} Filtering Analytics data with values that have double backslashes (`\\`) causes failures {#42105} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42105 | Fixed in Instance Manager 2.12.0 | + +{{}} +#### Description +When you apply a filter with double backslashes (`\\`) on any of the Analytics endpoints, such as metrics, events, or the security dashboard, the API fails to parse and apply the filter correctly. + +--- + +### {{% icon-bug %}} Configuration changes for NGINX Agent take longer than expected. {#41257} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 41257 | Open | + +{{}} +#### Description +NGINX Agent introduced the config_reload_monitoring_period parameter under nginx to define the duration which Agent will monitor the logs for relevant errors and warnings after a configuration change. As a result, configuration changes will take at least one second to appear. + +#### Workaround + +Adjust the config_reload_monitoring_period parameter to a value that suits your workflow. + +--- + + +## 2.9.1 +April 06, 2023 + +### {{% icon-bug %}} OIDC-authenticated users can't view the Users list using the API or web interface {#43031} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43031 | Open | + +{{}} +#### Description +When you use OIDC-based authentication in NGINX Management Suite, if the identity provider (IdP) sends an email address with an invalid format, users will be unable to access the list of Users through the web interface or API. + +#### Workaround + +To resolve this issue, please update the email addresses in your identity provider and ensure that all addresses are properly formatted. Once the email addresses are correctly formatted, users will be able to view the list of Users in the NGINX Management Suite. + +--- + + +## 2.9.0 +March 21, 2023 + +### {{% icon-resolved %}} NGINX configurations with special characters may not be editable from the web interface after upgrading Instance Manager {#41557} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 41557 | Fixed in Instance Manager 2.9.1 | + +{{}} +#### Description +After upgrading to Instance Manager 2.9.0, the system may display a "URI malformed" error if you use the web interface to edit a staged configuration or `nginx.conf` that contains special characters, such as underscores ("_"). + +--- + +### {{% icon-resolved %}} Installing NGINX Agent on FreeBSD fails with "error 2051: not implemented" {#41157} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 41157 | Fixed in Instance Manager 2.10.0 | + +{{}} +#### Description +Attempting to install NGINX Agent on FreeBSD fails with an error message: "error 2051: not implemented." + +#### Workaround + +If you are using FreeBSD, you can download the NGINX Agent from [https://github.com/nginx/agent/releases/tag/v2.23.2]( https://github.com/nginx/agent/releases/tag/v2.23.2) or use a previously installed version. + +--- + + +## 2.8.0 +January 30, 2023 + +### {{% icon-resolved %}} Upgrading NGINX Management Suite may remove the OIDC configuration for the platform {#41328} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 41328 | Fixed in Instance Manager 2.9.0 | + +{{}} +#### Description +Upgrading the NGINX Management Suite could result in the removal of your OIDC configuration, which would prevent users from being able to log in through OIDC. + +#### Workaround + +Prior to upgrading, we recommend that you [back up your configuration files]({{< relref "/nim/admin-guide/maintenance/backup-and-recovery.md" >}}) and the platform proxy. + +--- + +### {{% icon-bug %}} Precompiled Publication setting is reverted to false after error publishing NGINX App Protect policy {#40484} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 40484 | Open | + +{{}} +#### Description +After enabling the `precompiled_publication` setting in the `nginx-agent.conf` file, you may encounter the following error when attempting to publish NGINX App Protect policies to an instance: + +```text +{"instance:6629a097-9d91-356a-bd70-de0ce846cf2b":"unsupported file type for Nginx App Protect. Please use Nginx App Protect JSON file"}. +``` + +If this happens, the Precompiled Publication setting will be reverted to false/blank on the instance's detail page in the NGINX Management Suite web interface. + +#### Workaround + +1. Log in to the instance you're trying to publish the NGINX App Protect policies to and check if directory **/etc/nms** exists: + If directory **/etc/nms** doesn't exist, please create it first. + ```bash + sudo mkdir /etc/nms + sudo chown root:nginx-agent /etc/nms + ``` +2. Change the **precompiled_publication** setting in nginx-agent.conf to **false** + ```bash + sudo vi /etc/nginx-agent/nginx-agent.conf + ``` +3. Restart nginx-agent + ```bash + sudo systemctl restart nginx-agent + ``` +4. Change the **precompiled_publication** setting in nginx-agent.conf to **true** + ```bash + sudo vi /etc/nginx-agent/nginx-agent.conf + ``` +5. Restart nginx-agent + ```bash + sudo systemctl restart nginx-agent + ``` +The instance on the NGINX Management Suite's Instance Details page should show **Precompiled Publication** as **enabled**. + +--- + +### {{% icon-bug %}} Automatic downloads of attack signatures and threat campaigns are not supported on CentOS 7, RHEL 7, or Amazon Linux 2 {#40396} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 40396 | Open | + +{{}} +#### Description +If you use CentOS 7, RHEL 7, or Amazon Linux 2 and you have configured auto-downloads for new new Attack Signatures or Threat Campaigns in Instance Manager, you may encounter an error similar to the following example when attempting to publish an NGINX App Protect WAF policy: + +```json +{ + "error_message": "Data::MessagePack->unpack: parse error", + "completed_successfully": false, + "componentVersions": { + "wafEngineVersion": "10.179.0" + }, + "event": "configuration_load_failure" +} +``` + +#### Workaround + +This issue is related to [bug 39563](#39563) and has the same workaround. + +--- + +### {{% icon-resolved %}} App Protect: "Assign Policy and Signature Versions" webpage may not initially display newly added policies {#40085} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 40085 | Fixed in Instance Manager 2.9.0 | + +{{}} +#### Description +If you've published new policies by updating the `nginx.config` file, using the Instance Manager REST API, or through the web interface, you may not see the policy when you initially select **Assign Policy and Signature Versions** on the Policy Detail page. + +#### Workaround + +To fix this issue, return to the Policy Detail page and select **Assign Policy and Signature Versions** again. + +--- + +### {{% icon-bug %}} System reports "Attack Signature does not exist" when publishing default Attack Signature {#40020} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 40020 | Open | + +{{}} +#### Description +The default Attack Signature might be unavailable for publishing from Instance Manager, even though it is listed on the web interface. Attempting to publish this Attack Signature results in the error message "Error publishing the security content: attack signature does not exist." + +#### Workaround + +[Download another (latest recommended) version of the Attack Signature and publish it]({{< relref "/nim/nginx-app-protect/setup-waf-config-management.md" >}}). Attack Signature 2019.07.16 should be removed from the list when you refresh the web interface. + +--- + +### {{% icon-resolved %}} The Type text on the Instances overview page may be partially covered by the Hostname text {#39760} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 39760 | Fixed in Instance Manager 2.9.0 | + +{{}} +#### Description +On the Instances overview page, long hostnames may overlap and interfere with the visibility of the text in the Type column that displays the NGINX type and version. + +#### Workaround + +Select the hostname to open the instance details page to view the full information. + +--- + + +## 2.7.0 +December 20, 2022 + +### {{% icon-resolved %}} SELinux errors encountered when starting NGINX Management Suite on RHEL9 with the SELinux policy installed {#41327} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 41327 | Fixed in Instance Manager 2.10.0 | + +{{}} +#### Description +On RHEL9 with the SELinux policy loaded, NGINX Management Suite may report the following errors when starting: + +``` text +ausearch -m AVC,USER_AVC,SELINUX_ERR,USER_SELINUX_ERR -ts recent + +type=AVC msg=audit(1678828847.528:6775): avc: denied { watch } for pid=53988 comm="nms-core" path="/var/lib/nms/modules" dev="nvme0n1p4" ino=50345930 scontext=system_u:system_r:nms_t:s0 tcontext=system_u:object_r:nms_var_lib_t:s0 tclass=dir permissive=0 +``` + +#### Workaround + +If you encounter any of the errors mentioned above, you can attempt to rebuild and reload the NGINX Management Suite policy. To do so, follow these steps: + +1. Copy the `nms.te` and `nms.fc` files to a directory on your target machine. + + - {{< fa "download" >}} {{< link "/nim/release-notes/41327/nms.te" "nms.te" >}} + - {{< fa "download" >}} {{< link "/nim/release-notes/41327/nms.fc" "nms.fc" >}} + +2. [Install the `policycoreutils-devel` package](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/using_selinux/index#selinux-architecture_getting-started-with-selinux). +3. Change to the directory where you copied the `nms.te` and `nms.fc` files. +4. Rebuild the `nms.pp` file: + + ```bash + make -f /usr/share/selinux/devel//Makefile nms.pp + ``` + +5. Remove any existing NGINX Management Suite policy: + + ```bash + sudo semodule -r nms + ``` + +6. Install the new policy: + + ```bash + sudo semodule -n -i nms.pp + ``` + +7. To finish installing the NGINX Management Suite policy, follow the remaining instructions from the package manager output and restart the NGINX Management Suite services: + + ```bash + sudo systemctl restart nms + ``` + +8. After 10 minutes, check there are no more SELinux errors: + + ```bash + sudo ausearch -m avc --raw -se nms -ts recent + ``` + +--- + +### {{% icon-resolved %}} "Public Key Not Available" error when upgrading Instance Manager on a Debian-based system {#39431} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 39431 | Fixed in Instance Manager 2.9.0 | + +{{}} +#### Description +When attempting to upgrade Instance Manager on a Debian-based system, the command `sudo apt-get update` may return the error “public key is not available,” preventing the NGINX Agent from being updated. To resolve this issue, you need to update the public key first. + +#### Workaround + +To manually update the public key, take the following steps: + +1. Download a new key from the NGINX Management Suite host: + + - secure + + ```shell + curl https:///packages-repository/nginx-signing.key | gpg --dearmor | sudo tee /usr/share/keyrings/nginx-signing.gpg >/dev/null + ``` + + - insecure: + + ```shell + curl -k https:///packages-repository/nginx-signing.key | gpg --dearmor | sudo tee /usr/share/keyrings/nginx-signing.gpg >/dev/null + ``` + +2. Update the `nginx-agent.list` file to reference the new key: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-signing.gpg] https:///packages-repository/deb/ubuntu `lsb_release -cs` agent\n" | sudo tee /etc/apt/sources.list.d/nginx-agent.list + ``` + +--- + + +## 2.6.0 +November 17, 2022 + +### {{% icon-bug %}} "Unpack: parse error" when compiling security update packages on CentOS 7, RHEL 7, and Amazon Linux 2 {#39563} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 39563 | Open | + +{{}} +#### Description +If you are trying to publish an NGINX App Protect WAF policy after adding a new Attack Signature or Threat Campaign to Instance Manager, either through the `security/attack-signatures` or `security/threat-campaigns` API endpoints, or by enabling auto-downloads of signatures and threat campaigns, you may encounter an error similar to the following: + +```json +{ + "error_message": "Data::MessagePack->unpack: parse error", + "completed_successfully": false, + "componentVersions": { + "wafEngineVersion": "10.179.0" + }, + "event": "configuration_load_failure" +} +``` + +Example error output in `/var/log/nms`: + +```log +Feb 6 18:58:58 ip-172-16-0-23 : 2023-02-06T18:58:58.625Z#011[INFO] #011b5c8de8a-8243-4128-bc8f-5c02ea8df839+1675709938565522240#011compiler-controller/compiler.go:261#011starting compilation for compilation request identified by the fields - policy UID (19fa1ed0-c87d-4356-9ab0-d250c3b630f3), compiler version (4.2.0), attack signatures version (2022.10.27), threat campaigns version (2022.11.02), global state UID (d7b6b5b4-6aa6-4bd7-a3e2-bfaaf035dbe0) +Feb 6 18:58:58 ip-172-16-0-23 : 2023-02-06T18:58:58.625Z#011[DEBUG]#011b5c8de8a-8243-4128-bc8f-5c02ea8df839+1675709938565522240#011compiler-controller/compiler.go:294#011performing pre compilation +Feb 6 18:58:58 ip-172-16-0-23 : 2023-02-06T18:58:58.625Z#011[DEBUG]#011b5c8de8a-8243-4128-bc8f-5c02ea8df839+1675709938565522240#011compiler-controller/compiler.go:588#011Updating attack signatures from 2019.07.16 to 2022.10.27 +Feb 6 18:58:58 ip-172-16-0-23 : 2023-02-06T18:58:58.643Z#011[DEBUG]#011b5c8de8a-8243-4128-bc8f-5c02ea8df839+1675709938565522240#011compiler-controller/compiler.go:487#011copying the files for attack signature 2022.10.27 +Feb 6 18:58:58 ip-172-16-0-23 : 2023-02-06T18:58:58.644Z#011[DEBUG]#011b5c8de8a-8243-4128-bc8f-5c02ea8df839+1675709938565522240#011compiler-controller/compiler.go:515#011successfully copied over attack signatures version 2022.10.27 to compiler 4.2.0 +Feb 6 18:58:58 ip-172-16-0-23 : 2023-02-06T18:58:58.644Z#011[INFO] #011b5c8de8a-8243-4128-bc8f-5c02ea8df839+1675709938565522240#011compiler-controller/compiler.go:639#011executing the following pre compilation command - /opt/nms-nap-compiler/app_protect-4.2.0/bin/config_set_compiler --update-signatures +Feb 6 18:59:02 ip-172-16-0-23 : 2023-02-06T18:59:02.750Z#011[INFO] #011b5c8de8a-8243-4128-bc8f-5c02ea8df839+1675709938565522240#011compiler-controller/compiler.go:642#011stdout and stderr produced from the pre compilation command '/opt/nms-nap-compiler/app_protect-4.2.0/bin/config_set_compiler --update-signatures': +Feb 6 18:59:02 ip-172-16-0-23 : --- stdout --- +Feb 6 18:59:02 ip-172-16-0-23 : {"error_message":"Data::MessagePack->unpack: parse error","completed_successfully":false,"componentVersions":{"wafEngineVersion":"10.179.0"},"event":"configuration_load_failure"} +Feb 6 18:59:02 ip-172-16-0-23 : --- stderr --- +``` + +#### Workaround + +Download the `attack-signatures` and/or `threat-campaigns` packages for CentOS 7, RHEL 7, or Amazon Linux 2 from the NGINX repo directly to your Instance Manager host by following the instructions in the official NGINX App Protect documentation: + +- [Attack Signatures Documentation](https://docs.nginx.com/nginx-app-protect/admin-guide/install/#centos--rhel-74--amazon-linux-2) +- [Threat Campaigns Documentation](https://docs.nginx.com/nginx-app-protect/admin-guide/install/#centos--rhel-74--amazon-linux-2-1) + +After downloading the `attack-signatures` and/or `threat-campaigns` packages onto your Instance Manager host, give Instance Manager about 15 seconds to recognize these packages. + +If the logging level is set to `debug`, you should see the following logs that confirm a successful installation: + +```log +Feb 6 20:35:17 ip-172-16-0-23 : 2023-02-06T20:35:17.174Z#011[DEBUG]#011nms-integrations #011compiler-controller/security_updates_monitor.go:256#011detected change in attack signature files [/opt/app_protect/var/update_files/signatures/signatures.bin.tgz /opt/app_protect/var/update_files/signatures/signature_update.yaml /opt/app_protect/var/update_files/signatures/version]... syncing +Feb 6 20:35:17 ip-172-16-0-23 : 2023-02-06T20:35:17.175Z#011[DEBUG]#011nms-integrations #011compiler-controller/security_updates_monitor.go:307#011downloading attack signatures version - 2023.01.26 +Feb 6 20:35:17 ip-172-16-0-23 : 2023-02-06T20:35:17.193Z#011[DEBUG]#011nms-integrations #011compiler-controller/security_updates_monitor.go:349#011successfully downloaded attack signatures version - 2023.01.26 +Feb 6 20:46:02 ip-172-16-0-23 : 2023-02-06T20:46:02.176Z#011[DEBUG]#011nms-integrations #011compiler-controller/security_updates_monitor.go:274#011detected change in threat campaign files [/opt/app_protect/var/update_files/threat_campaigns/threat_campaigns.bin.tgz /opt/app_protect/var/update_files/threat_campaigns/threat_campaign_update.yaml /opt/app_protect/var/update_files/threat_campaigns/version]... syncing +Feb 6 20:46:02 ip-172-16-0-23 : 2023-02-06T20:46:02.176Z#011[DEBUG]#011nms-integrations #011compiler-controller/security_updates_monitor.go:370#011downloading threat campaigns version - 2023.01.11 +Feb 6 20:46:02 ip-172-16-0-23 : 2023-02-06T20:46:02.191Z#011[DEBUG]#011nms-integrations #011compiler-controller/security_updates_monitor.go:412#011successfully downloaded threat campaigns version - 2023.01.11 +``` + +Once the `attack-signatures` and/or `threat-campaigns` packages have been added to the library, you can list them by making a `GET` request to the corresponding API endpoints. + +- attack signatures - `https://{nms-fqdn}/api/platform/v1/security/attack-signatures` +- threat campaigns - `https://{nms-fqdn}/api/platform/v1/security/threat-campaigns` + +--- + +### {{% icon-bug %}} App Protect Policies page fails when deployed via Helm chart {#38782} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 38782 | Open | + +{{}} +#### Description +When installing NGINX Instance Manager on Kubernetes via Helm Chart, the App Protect page shows an error banner, and no default policies are displayed. + +--- + +### {{% icon-bug %}} Config deployment could fail when referencing remote cert inside allowed directories {#38596} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 38596 | Open | + +{{}} +#### Description +Deploying NGINX config with references to remote cert that resides in allowed directories could fail, with the following error: +`BIO_new_file() failed (SSL: error:02001002:system library:fopen:No such file or directory`. + +This can also be diagnosed with log entries in `/var/log/nginx-agent/agent.log`, noting the removal of the referenced certificate. + +#### Workaround + +- Add the referenced cert to NMS as managed certificate and publish the config again. +- Move the referenced remote certificate to a directory that's not in the allowed directory list. + +--- + +### {{% icon-bug %}} When upgrading a multi-node NMS deployment with helm charts the core, dpm, or integrations pods may fail to start {#38589} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 38589 | Open | + +{{}} +#### Description +When using the NMS Instance Manager Helm upgrade command on a multi worker node kubernetes cluster setup, the core, dpm and integrations deployments may fail to upgrade. + +#### Workaround + +Post upgrade, do the following steps: + +> kubectl -n nms scale --replicas=0 deployment.apps/dpm; kubectl -n nms scale --replicas=1 deployment.apps/dpm +> kubectl -n nms scale --replicas=0 deployment.apps/core; kubectl -n nms scale --replicas=1 deployment.apps/core +> kubectl -n nms scale --replicas=0 deployment.apps/integrations; kubectl -n nms scale --replicas=1 deployment.apps/integrations + +--- + +### {{% icon-bug %}} Unreferenced NGINX App Protect policy file in /etc/nms {#38488} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 38488 | Open | + +{{}} +#### Description +When using NGINX Instance Manager with App Protect policies, previously referenced policies in the NGINX configuration may not be removed after they are no longer referenced in the NGINX config. + +#### Workaround + +Unreferenced policy files may be removed manually from /etc/nms. + +--- + +### {{% icon-bug %}} HTTP version schema returns incorrect value in Advanced metrics module {#38041} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 38041 | Open | + +{{}} +#### Description +The values currently populated for http.version_schema are incorrect. The response is "4" for HTTP traffic and "6" for HTTPS traffic. + +--- + +### {{% icon-resolved %}} Count of NGINX Plus graph has a delay in being populated {#37705} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 37705 | Fixed in Instance Manager 2.11.0 | + +{{}} +#### Description +When viewing the NGINX Plus usage in Instance Manager, the graph displaying usage over time requires several hours of data before displaying the count. + +#### Workaround + +The data presented in the graph can be retrieved from the API. + +--- + +### {{% icon-bug %}} External references are not supported in App Protect policies {#36265} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 36265 | Open | + +{{}} +#### Description +References to external files in a policy are not supported. + +For example, in the NGINX App Protect WAF JSON declarative policy, these references are not supported: +- User-defined signatures - " not supporting for a while" @dan +- Security controls in external references +- Referenced OpenAPI Spec files + +--- + + +## 2.5.0 +October 04, 2022 + +### {{% icon-bug %}} Aux data fails to upload if the size is greater than 3145728 characters {#37498} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 37498 | Open | + +{{}} +#### Description +Updating a config with an aux data file exceeding 3145728 characters fails with a validation error similar to the following example: + +Request body has an error: doesn't match the schema: Error at "/auxFiles/files/3/contents": maximum string length is 3145728 + +--- + +### {{% icon-bug %}} Staged configs fail to publish after upgrading NGINX Management Suite {#37479} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 37479 | Open | + +{{}} +#### Description +After upgrading NGINX Management Suite to 2.5.0, when you try to publish a staged config from the web interface, the system returns an error similar to the following: + +> "The published configuration is older than the active instance configuration." + +#### Workaround + +Make a minor edit to a staged config, such as adding a space, then save the change. You should be able to publish now. + +--- + +### {{% icon-bug %}} "Deployment Not Found" error when publishing NGINX config to NATS server {#37437} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 37437 | Open | + +{{}} +#### Description +Occasionally, when publishing an NGINX config to a NATS server, the system returns a `Deployment Not Found` error, and the `nms.log` file includes the error `http failure with code '131043': `. + +#### Workaround + +Remove the existing NATs working directory and restart the NMS Data Plane Manager (`nms-dpm`) service as root. + +{{}}Restarting the `nms-dpm` service is disruptive and may result in the loss of event data. You should schedule a maintenance window for restarting the service.{{}} + +```bash +rm -rf /var/lib/nms/streaming +systemctl restart nms-dpm +``` + +--- + + +## 2.3.0 +June 30, 2022 + +### {{% icon-bug %}} Scan misidentifies some NGINX OSS instances as NGINX Plus {#35172} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 35172 | Open | + +{{}} +#### Description +When NGINX Plus is installed on a datapath instance, then removed and replaced with NGINX OSS, NGINX Instance Manager may incorrectly identify the instance as an NGINX Plus instance. This is due to multiple NGINX entries for the same datapath. + +#### Workaround + +Use NGINX Instance Manager's NGINX Instances API to remove the inactive NGINX instance. For instructions, refer to the API reference guide, which you can find at `https:///ui/docs`. + +You may need to stop the NGINX Agent first. To stop the NGINX Agent, take the following steps: + +```bash +sudo systemctl stop nginx-agent +``` + +--- + +### {{% icon-bug %}} Metrics may report additional data {#34255} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 34255 | Open | + +{{}} +#### Description +NGINX Instance Manager reports metrics at a per-minute interval and includes dimensions for describing the metric data's characteristics. + +An issue has been identified in which metric data is aggregated across all dimensions, not just for existing metrics data. When querying the Metrics API with aggregations like `SUM(metric-name)`, the aggregated data causes the API to over count the metric. This overcounting skews some of the metrics dashboards. + +#### Workaround + +When querying the Metrics API, you can exclude the data for an aggregated dimension by specifying the dimension name in the `filterBy` query parameter. + +```none +filterBy=!= '' +``` + +--- + + +## 2.2.0 +May 25, 2022 + +### {{% icon-bug %}} Giving long names (255+ characters) to certificates causes internal error {#34185} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 34185 | Open | + +{{}} +#### Description +When adding certificates, an internal error (error code: 134018) is returned if the name given for the certificate exceeds 255 characters. + +#### Workaround + +Use a name that is 255 or fewer characters. + +--- + + +## 2.1.0 +April 05, 2022 + +### {{% icon-bug %}} An unexpected number of instances are shown after upgrading nginx-agent to 2.1.0 {#33307} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 33307 | Open | + +{{}} +#### Description +After upgrading to NGINX Instance Manager 2.1.0, and updating nginx-agent from platform packaging, duplicate instances may appear on the Instance overview page. This issue is caused by a change in how the NGINX Agent generates the `system_uid`. + +#### Workaround + +You can safely delete the older entries or wait for them to expire. + +--- + +### {{% icon-bug %}} “No such process” error occurs when publishing a configuration {#33160} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 33160 | Open | + +{{}} +#### Description +When publishing a configuration, you might encounter an error similar to the following example: + +``` text +config action failed: Config apply failed (write): no such process +``` + +This error can occur when there is a desyncronization between the NGINX Agent and NGINX PID, often after manually restarting NGINX when the Agent is running. + +#### Workaround + +Restart the NGINX Agent: + +``` bash +sudo systemctl restart nginx-agent +``` + +--- + + +## 2.0.0 +December 21, 2021 + +### {{% icon-bug %}} NGINX App Protect WAF blocks NGINX Instance Manager from publishing configurations {#32718} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 32718 | Open | + +{{}} +#### Description +NGINX Instance Manager does not currently support managing NGINX App Protect WAF instances. NGINX App Protect WAF may block attempts to publish configurations to NGINX App Protect WAF instances. + +--- + +### {{% icon-resolved %}} Instance Manager reports old NGINX version after upgrade {#31225} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 31225 | Fixed in Instance Manager 2.7.0 | + +{{}} +#### Description + After upgrading NGINX to a new version, the NGINX Instance Manager web interface and API report the old NGINX version until the NGINX Agent is restarted. + +#### Workaround + + Restart the Agent to have the new version reflected properly: + + ```bash + systemctl restart nginx-agent + ``` + +--- + +### {{% icon-bug %}} Web interface doesn’t report error when failing to upload large config files {#31081} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 31081 | Open | + +{{}} +#### Description +In the web interface, when uploading a config file that's larger than 50 MB (max size), the system incorrectly reports the state as `Analyzing` (Status code `403`), although the upload failed. + +#### Workaround + +Keep config files under 50 MB. + +--- + +### {{% icon-bug %}} CentOS 7, RHEL 7, and Amazon Linux 2 package managers allow unsupported NGINX/NGINX Plus versions {#28758} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 28758 | Open | + +{{}} +#### Description +When installing on CentOS 7, RHEL 7, and Amazon Linux 2, the package manager doesn't prevent installing NGINX Instance Manager with unsupported versions of NGINX or NGINX Plus. As a consequence, it is possible that `nms-instance-manager` is installed without an NGINX gateway. Resulting in a less than optimal experience. + +#### Workaround + +Install a supported version of NGINX (v1.18 or later) or NGINX Plus (R22 or later). See the [Technical Specifications]({{< relref "/nim/fundamentals/tech-specs" >}}) guide for details. + +--- + +### {{% icon-bug %}} gRPC errors occur when starting NGINX Instance Manager {#28683} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 28683 | Open | + +{{}} +#### Description + When starting NGINX Instance Manager, you may see errors similar to the following in `/etc/nginx/conf.d/nms-http.conf:227`: + + ```text + nginx[1234]: nginx: [emerg] unknown directive "grpc_socket_keepalive" + ``` + +#### Workaround + +Make sure your version of NGINX is v1.18 or later. diff --git a/content/nim/releases/release-notes.md b/content/nim/releases/release-notes.md new file mode 100644 index 000000000..3b91a208d --- /dev/null +++ b/content/nim/releases/release-notes.md @@ -0,0 +1,1508 @@ +--- +docs: DOCS-938 +title: Release Notes +toc: true +weight: 100 +--- + +The release notes for F5 NGINX Instance Manager highlight the latest features, improvements, and bug fixes in each release. This document helps you stay up to date with the changes and enhancements introduced to improve stability, performance, and usability. For each version, you’ll find details about new features, known issues, and resolved problems, ensuring you get the most out of your NGINX instance management experience. + +
    + Support for NGINX App Protect WAF + +{{< include "nim/tech-specs/nim-app-protect-support.md" >}} + +
    + +--- + +## 2.18.0 + +November 8, 2024 + +### Upgrade Paths {#2-18-0-upgrade-paths} + +NGINX Instance Manager supports upgrades from these previous versions: + +- 2.15.x - 2.17.x + +If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-18-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Adds support for NGINX App Protect WAF v5** + + NGINX Instance Manager 2.18.0 adds support for [NGINX App Protect WAF v5.3 and v4.11]({{< relref "nap-waf/v5/admin-guide/overview.md" >}}). + + NGINX App Protect WAF v5, designed for both NGINX Open Source and NGINX Plus environments, includes a dynamic NGINX module and containerized WAF services. It provides robust security and scalability. + +- {{% icon-feature %}} **Ridiculously easy NGINX Instance Manager installation script (Bash)** + + Reduce the number of steps to deploy all NGINX Instance Manager components, including prerequisites, using a single [installation script]({{< relref "nim/deploy/vm-bare-metal/install.md" >}}). The script supports every OS that NGINX Instance Manager supports in the [technical specifications]({{< relref "nim/fundamentals/tech-specs.md" >}}). + + The script installs NGINX (Plus or Open Source), ClickHouse, and NGINX Instance Manager. Customers only need their NGINX Plus certificate, key, and, for NGINX Plus R33 or later, a JWT downloaded from MyF5. Support for offline installations will be added in a future update. + + Support for [offline installations]({{< relref "nim/disconnected/offline-install-guide.md" >}}) is also available for air-gapped environments. + +- {{% icon-feature %}} **Entitlement and visibility for NGINX Plus R33 – Telemetry reporting for network-restricted environments** + + If NGINX Instance Manager has internet access, customers can [automatically or manually send the usage data to F5]({{< relref "nim/admin-guide/license/report-usage-connected-deployment.md" >}}) as part of the new NGINX Plus R33 changes. + + For customers who have NGINX Instance Manager deployed in [network-restricted environments]({{< relref "nim/disconnected" >}}), this release also includes support for manual usage reporting. Customers can now manually license NGINX Instance Manager and export usage telemetry for fully disconnected environments. For usage reporting, customers can: + + - **Export the usage report**: Manually export the usage report from NGINX Instance Manager. + - **Send the report to F5**: Submit the report to F5 for verification from a location with internet access. + - **Upload the acknowledgment**: After verification, upload the acknowledgment from F5 to NGINX Instance Manager. + +- {{% icon-feature %}} **Resilient Docker Compose NGINX Instance Manager deployment** + + In 2.17, we released a [bundled container image]({{< relref "nim/deploy/docker/deploy-nginx-instance-manager-docker.md" >}}) with all NGINX Instance Manager components. While this is a great option for demos and lab environments, it is not the most fault-tolerant for production. + + This [Docker Compose option]({{< relref "nim/deploy/docker/deploy-nginx-instance-manager-docker-compose.md" >}}) unlocks another easy, production-ready installation method for customers using Docker. It will also make upgrades easier when new Docker images are released by F5 NGINX. This option includes health checking, NGINX App Protect compilation support, and security monitoring. + +### Changes in Default Behavior{#2-18-0-changes-in-behavior} + +This release has the following changes in default behavior: + +- {{% icon-feature %}} **The NGINX Usage page now only shows instances configured with the NGINX Plus R33 mgmt block.** + + The “NGINX Usage” page previously displayed instances connected to NGINX Instance Manager through multiple methods, including the NGINX Agent, health checks, and the `mgmt` block in NGINX Plus R31-R32. With the introduction of native reporting in NGINX Plus R33, only instances using this feature appear on the page, preventing duplicates. For more information on R33 usage reporting, see [About subscription licenses]({{< relref "solutions/about-subscription-licenses.md" >}}). + +### Resolved Issues{#2-18-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Mismatch in date formats in custom date selection on NGINX usage graph [(45512)]({{< relref "/nim/releases/known-issues.md#45512" >}}) +- {{% icon-resolved %}} Failure to notify user when template configuration publish fails [(44975)]({{< relref "/nim/releases/known-issues.md#44975" >}}) + + +### Known Issues{#2-18-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +## 2.17.4 + +November 06, 2024 + +### Upgrade Paths {#2-17-4-upgrade-paths} + +NGINX Instance Manager supports upgrades from these previous versions: + +- 2.14.0 - 2.17.3 + +If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-17-4-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Known Issues{#2-17-4-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.17.3 + +September 13, 2024 + +### Upgrade Paths {#2-17-3-upgrade-paths} + +NGINX Instance Manager supports upgrades from these previous versions: + +- 2.14.0 - 2.17.2 + +If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-17-3-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **GPG key update for NGINX Agent packages** + + Previous releases of NGINX Instance Manager included NGINX Agent packages signed with an expired GPG key. This release of NGINX Instance Manager includes updated keys, allowing users to successfully download the NGINX Agent from NGINX Instance Manager. + + +### Known Issues{#2-17-3-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.17.2 + +August 21, 2024 + +### Upgrade Paths {#2-17-2-upgrade-paths} + +NGINX Instance Manager supports upgrades from these previous versions: + +- 2.14.0 - 2.17.1 + +If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-17-2-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements for a more reliable experience. + + +### Known Issues{#2-17-2-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.17.1 + +July 24, 2024 + +### Upgrade Paths {#2-17-1-upgrade-paths} + +NGINX Instance Manager supports upgrades from these previous versions: + +- 2.14.0 - 2.17.0 + +If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-17-1-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements for a more reliable experience. + + +### Known Issues{#2-17-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.17.0 + +July 10, 2024 + +### Upgrade Paths {#2-17-0-upgrade-paths} + +NGINX Instance Manager supports upgrades from these previous versions: + +- 2.14.0 - 2.16.0 + +If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-17-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Adds support for NGINX App Protect WAF v5** + + NGINX Instance Manager 2.17.0 adds support for [NGINX App Protect WAF v5.](https://docs.nginx.com/nginx-app-protect-waf/v5/admin-guide/overview/). + + NGINX App Protect WAF v5 (designed for both NGINX Open Source and NGINX Plus environments) consists of a dynamic NGINX module and containerized WAF services, providing robust security and scalability. + +- {{% icon-feature %}} **Hosted Docker images for Kubernetes Helm charts** + + Prior to this release, users had to download NGINX Instance Manager docker images and push them to their local container registry for use in the Kubernetes Helm charts. This was not very turnkey and required multiple steps before being able to use the Helm charts. Now all Instance Manager container images are available from F5's public docker repository, simplifying the installation in Kubernetes. + + See the [Deploy Instance Manager on Kubernetes]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md" >}}) documentation for more information. + +- {{% icon-feature %}} **Ansible role to deploy NGINX Instance Manager** + + This release comes with an Ansible role to help you Install NGINX Instance Manager quickly, while also encouraging the best practices for your chosen environment. + +- {{% icon-feature %}} **NGINX Instance Manager IaC using Packer and Terraform** + + This release improves the [Infrastructure as Code (IaC) project]({{< relref "/nim/deploy/infrastructure-as-code/overview.md#nginx-management-suite-infrastructure-as-code" >}}) to help you quickly get started with NGINX Instance Manager using Packer and Terraform. + + The project uses Packer to create images and Terraform to deploy these images to your preferred cloud provider, including GCP, Azure, or vSphere. + +- {{% icon-feature %}} **Single docker image with all the NGINX Instance Manager services and dependencies** + + This release includes access to a single Docker image for running NGINX Instance Manager as a container. This allows customers to deploy Instance Manager locally with a single "docker run" command. For more details, see [Deploy NGINX Instance Manager in a Single Docker Container]({{< relref "/nim/deploy/docker/deploy-nginx-instance-manager-docker.md" >}}). + +### Changes in Default Behavior{#2-17-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Web Analytics** + + NGINX Instance Manager now collects and sends anonymized telemetry and interaction information for analysis by F5 NGINX. This information is used to improve our products and services. + + Customers have the option to opt out of data collection by disabling the feature in the Instance Manager web interface, using the Account menu in the top-right corner of the screen. For more details, see [Configure Telemetry and Web Analytics]({{< relref "/nim/system-configuration/configure-telemetry.md" >}}). + +- {{% icon-feature %}} **Augment Template order now matches NGINX configuration structure** + + When you generate a configuration using augment templates, the order shown in the UI now matches the structure of an NGINX configuration. This makes filling out a template more intuitive. + +- {{% icon-feature %}} **End of support for CentOS 7 and Red Hat Enterprise Linux 7** + + CentOS 7 and Red Hat Enterprise Linux 7 reached [end of maintenance support](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux/rhel-7-end-of-maintenance) on June 30, 2024. + + Since these operating systems won't get any more updates or security patches, NGINX Instance Manager no longer supports them. + + Please upgrade your environment to one of the [supported distributions]({{< relref "/nim/fundamentals/tech-specs.md#supported-distributions" >}}) to continue using NGINX Instance Manager. + +### Resolved Issues{#2-17-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Users receive login error when NGINX Management Suite is deployed in Kubernetes [(44686)]({{< relref "/nim/releases/known-issues.md#44686" >}}) +- {{% icon-resolved %}} REST API does not work until you log into the web interface first [(44877)]({{< relref "/nim/releases/known-issues.md#44877" >}}) +- {{% icon-resolved %}} Editing template submissions uses the latest versions, may cause "malformed" errors [(44961)]({{< relref "/nim/releases/known-issues.md#44961" >}}) +- {{% icon-resolved %}} Editing template submissions now allows for using most recent template version [(44971)]({{< relref "/nim/releases/known-issues.md#44971" >}}) + +### Known Issues{#2-17-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.16.0 + +April 18, 2024 + +### Upgrade Paths {#2-16-0-upgrade-paths} + +NGINX Instance Manager supports upgrades from these previous versions: + +- 2.13.0 - 2.15.1 + +If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-16-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Introducing configuration templates for simplifying NGINX configurations and self-service workflows** + + This release of NGINX Instance Manager introduces [Config Templates]({{< relref "nim/nginx-configs/config-templates/concepts/config-templates.md" >}}). These templates use Go templating to make it easier to set up and standardize NGINX configurations. Now, you don't need to know all the details of NGINX syntax to create a working configuration. Just provide the required inputs for a template, and the system will do the rest. This makes setting up NGINX simpler and helps you follow best practices. + + To provide more control over your configurations, [augment templates]({{< relref "nim/nginx-configs/config-templates/concepts/default-base-template.md#augmenting-global-default-base-template" >}}) let you modify only specific segments of your NGINX configuration. This, when combined with [RBAC for template submissions]({{< relref "/nim/nginx-configs/config-templates/how-to/rbac-config-templates-and-submissions.md" >}}), enables self-service workflows. Look for pre-built templates for common scenarios in our GitHub repositories soon. + +- {{% icon-feature %}} **Stability and performance improvements** + + This release enhances system stability and performance. + + +### Changes in Default Behavior{#2-16-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Change in NGINX Agent upgrade behavior** + + Starting from version v2.31.0, the NGINX Agent will automatically restart itself during an upgrade. + + +### Known Issues{#2-16-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.15.1 + +February 14, 2024 + +### Upgrade Paths {#2-15-1-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.12.0 - 2.15.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-15-1-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Resolved Issues{#2-15-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Helm chart backup and restore is broken in NIM 2.15.0 [(44758)]({{< relref "/nim/releases/known-issues.md#44758" >}}) +- {{% icon-resolved %}} Unable to use NMS Predefined Log Profiles for NAP 4.7 [(44759)]({{< relref "/nim/releases/known-issues.md#44759" >}}) + +### Known Issues{#2-15-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.15.0 + +December 12, 2023 + +### Upgrade Paths {#2-15-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.12.0 - 2.14.1 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-15-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Support for CA Certificates added** + + Instance Manager now allows for managing CA Certificates to fully support NGINX directives such as _proxy_ssl_trusted_ and _proxy_ssl_verify_. The main difference after this change is that you no longer need a corresponding key to upload a certificate to Instance Manager. + + +### Resolved Issues{#2-15-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Instances reporting incorrect memory utilization [(44351)]({{< relref "/nim/releases/known-issues.md#44351" >}}) +- {{% icon-resolved %}} Data on the dashboard is updating unexpectedly [(44504)]({{< relref "/nim/releases/known-issues.md#44504" >}}) +- {{% icon-resolved %}} Missing Data when ClickHouse services are not running [(44586)]({{< relref "/nim/releases/known-issues.md#44586" >}}) +- {{% icon-resolved %}} NGINX App Protect Attack Signature, Threat Campaign and Compiler fail to download [(44603)]({{< relref "/nim/releases/known-issues.md#44603" >}}) + +### Known Issues{#2-15-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.14.1 + +October 19, 2023 + +### Upgrade Paths {#2-14-1-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.11.0 - 2.14.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-14-1-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Known Issues{#2-14-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.14.0 + +October 16, 2023 + +### Upgrade Paths {#2-14-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.11.0 - 2.13.1 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-14-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Instance Manager Dashboard** + + Monitor the health and performance of your NGINX instance fleet from a single page. Get insights and trends on CPU, memory, disk, and network traffic utilization. Quickly spot and mitigate common HTTP errors and TLS certificate issues. See the [Instance Manager Dashboard]({{< relref "nim/fundamentals/dashboard-overview.md" >}}) documentation to learn more. + +- {{% icon-feature %}} **Work with NGINX App Protect Bundles from Instance Manager** + + Starting with Instance Manager 2.14, you can now use the "/security/policies/bundles" endpoint to create, read, update, and delete NGINX App Protect bundles, which allow faster deployment through pre-compilation of security policies, attack signatures, and threat-campaign. For additional information on how to use the API endpoint, refer to your product API documentation. + To learn more about this feature, see the [Manage WAF Security Policies]({{< relref "/nim/nginx-app-protect/manage-waf-security-policies.md" >}}) documentation. + +- {{% icon-feature %}} **Clickhouse LTS 23.8 support** + + This release of Instance Manager has been tested and is compatible with Clickhouse LTS versions 22.3.15.33 to 23.8. + + +### Changes in Default Behavior{#2-14-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Inactive NGINX instances are automatically removed over time** + + If an NGINX instance has been inactive (NGINX Agent not reporting to NGINX Management Suite) for a fixed amount of time, it is now automatically removed from the instances list. Instances deployed in a virtual machine or hardware are removed after 72 hours of inactivity, and those deployed in a container are removed after 12 hours. + + +### Known Issues{#2-14-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.13.1 + +September 05, 2023 + +### Upgrade Paths {#2-13-1-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.10.0 - 2.13.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### Resolved Issues{#2-13-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Validation errors in Resource Groups for certificates uploaded before 2.13 upgrade [(44254)]({{< relref "/nim/releases/known-issues.md#44254" >}}) +- {{% icon-resolved %}} Access levels cannot be assigned to certain RBAC features [(44277)]({{< relref "/nim/releases/known-issues.md#44277" >}}) + +### Known Issues{#2-13-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.13.0 + +August 28, 2023 + +### Upgrade Paths {#2-13-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.10.0 - 2.12.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-13-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Easily manage access to specific objects with Resource Groups** + + With NGINX Instance Manager, you can now combine Instances, Instance Groups, and Certificates into a Resource Group. This grouping can be used when defining roles to grant access to those specific objects. When objects are added to or removed from the Resource Group, the changes are automatically reflected in any roles that use the Resource Group. For more details, refer to [Working with Resource Groups]({{< relref "/nim/admin-guide/rbac/manage-resource-groups.md" >}}). + +- {{% icon-feature %}} **Get version controlled NGINX configurations with an external commit hash** + + The Instance Manager REST API supports setting and retrieving instances, instance groups, and staged NGINX configurations using a version control commit hash. + + To learn how to use a commit hash with NGINX configurations, refer to these topics: + + - [Add Hash Versioning to Staged Configs]({{< relref "/nim/nginx-configs/stage-configs.md#hash-versioning-staged-configs" >}}) + - [Publish Configs with Hash Versioning to Instances]({{< relref "/nim/nginx-configs/publish-configs.md#publish-configs-instances-hash-versioning" >}}) + - [Publish Configs with Hash Versioning to Instance Groups]({{< relref "/nim/nginx-configs/publish-configs.md#publish-configs-instance-groups-hash-versioning" >}}) + +- {{% icon-feature %}} **Configure analytics data retention with the nms.conf file** + + You can set the data retention policy for analytics data, which includes metrics, events, and security events, in the `nms.conf` file. By default, metrics and security events are stored for 32 days, while events are stored for 120 days. To keep data for a longer period, update the retention durations in the `nms.conf` file. + +- {{% icon-feature %}} **RBAC for security policies** + + You can now use [Role-Based Access Control (RBAC)]({{< relref "/nim/admin-guide/rbac/overview-rbac.md" >}}) to allow or restrict the level of access to security policies according to your security governance model. + +- {{% icon-feature %}} **RBAC for log profiles** + + You can now use [Role-Based Access Control (RBAC)]({{< relref "/nim/admin-guide/rbac/overview-rbac.md" >}}) to allow or restrict access to log profiles according to your security governance model. + +- {{% icon-feature %}} **Use NGINX Plus Health Checks to easily track NGINX Plus Usage with NGINX Instance Manager** + + The NGINX Plus Health Check feature now allows you to monitor the count of both NGINX Plus and NGINX App Protect instances that you've deployed. You can view this information in the "NGINX Plus" area of the "Instance Manager" web interface, or through the `/inventory` API. For guidance on how to set this up, refer to the following documentation: [View Count of NGINX Plus Instances]({{< relref "/nim/admin-guide/license/report-usage-connected-deployment.md" >}}). + +- {{% icon-feature %}} **Improved log output for better JSON parsing** + + In the log output, extra whitespace has been removed, and brackets have been removed from the log `level` field. This results in clean, parsable log output, particularly when using JSON log encoding. + + +### Resolved Issues{#2-13-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} An "unregistered clickhouse-adapter" failure is logged every few seconds if logging is set to debug. [(43438)]({{< relref "/nim/releases/known-issues.md#43438" >}}) + +### Known Issues{#2-13-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.12.0 + +July 20, 2023 + +### Upgrade Paths {#2-12-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.9.0 - 2.11.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-12-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **New support for license tokens for automatic entitlement updates, renewals, and Flexible Consumption Reporting** + + NGINX Management Suite now supports license tokens formatted as a JSON Web Token (JWT). With JWT licensing, you can automatically update entitlements during subscription renewals or amendments, and you can automate reporting for the Flexible Consumption Program (FCP). For more information, see the [Add a License]({{< relref "/nim/admin-guide/license/add-license.md" >}}) topic. + + +### Resolved Issues{#2-12-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Filtering Analytics data with values that have double backslashes (`\\`) causes failures [(42105)]({{< relref "/nim/releases/known-issues.md#42105" >}}) +- {{% icon-resolved %}} Unable to publish configurations referencing the log bundle for Security Monitor [(42932)]({{< relref "/nim/releases/known-issues.md#42932" >}}) +- {{% icon-resolved %}} Disk Usage in Metrics Summary shows incorrect data when multiple partitions exist on a system [(42999)]({{< relref "/nim/releases/known-issues.md#42999" >}}) + +### Known Issues{#2-12-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.11.0 + +June 12, 2023 + +### Upgrade Paths {#2-11-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.8.0 - 2.10.1 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-11-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **The config editor now lets you see auxiliary files** + + Auxiliary files, such as certificate files and other non-config files on managed instances or instance groups, are now visible in the file tree of the config editor view. This improvement makes it easier to reference these files within a configuration. + +- {{% icon-feature %}} **Introducing new predefined log profiles for NGINX App Protect WAF** + + Now, managing your NGINX App Protect WAF configuration is even easier with new predefined log profiles. In addition to the existing log_all, log_blocked, log_illegal, and log_secops log profiles, the following new predefined log profiles are now available: + + - log_f5_arcsight + - log_f5_splunk + - log_grpc_all + - log_grpc_blocked + - log_grpc_illegal + + These new log profiles make it even easier to integrate NGINX App Protect WAF with other logging systems, such as Splunk, ArcSight, and gRPC. + +- {{% icon-feature %}} **You can now install Advanced Metrics automatically when you install NGINX Agent** + + When installing the NGINX Agent with NGINX Management Suite, you can include the `-a` or `--advanced-metrics` flag. Including this option installs the Advanced Metrics module along with the NGINX Agent. With this module, you gain access to extra metrics and insights that enrich the monitoring and analysis capabilities of the NGINX Management Suite, empowering you to make more informed decisions. + +- {{% icon-feature %}} **NGINX Management Suite can send telemetry data to F5 NGINX** + + In order to enhance product development and support the success of our users with NGINX Management Suite, we offer the option to send limited telemetry data to F5 NGINX. This data provides valuable insights into software usage and adoption. By default, telemetry is enabled, but you have the flexibility to disable it through the web interface or API. For detailed information about the transmitted data, please refer to our documentation. + + +### Changes in Default Behavior{#2-11-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **The location of agent-dynamic.conf has changed** + + In this release, the `agent-dynamic.conf` file has been moved from `/etc/nginx-agent/` to `/var/lib/nginx-agent/`. To assign an instance group and tags to an instance, you will now need to edit the file located in `/var/lib/nginx-agent/`. + +- {{% icon-feature %}} **Action required:Update OIDC configurations for management plane after upgrading to Instance Manager 2.11.0** + + In Instance Manager 2.11.0, we added support for telemetry to the OIDC configuration files. Existing OIDC configurations will continue to work, but certain telemetry events, such as login, may not be captured. + +- {{% icon-feature %}} **Configuration file permissions have been lowered to strengthen security** + + To strengthen the security of configuration details, certain file permissions have been modified. Specifically, the following configuration files now have lowered permissions, granting Owner Read/Write access and Group Read access (also referred to as `0640` or `rw-r-----`): + + - /etc/nms/nginx.conf + - /etc/nginx/conf.d/nms-http.conf + - /etc/nms/nginx/oidc/openid_configuration.conf + - /etc/nms/nginx/oidc/openid_connect.conf + + Additionally, the following file permissions have been lowered to Owner Read/Write and Group Read/Write access (also known as `0660` or `rw-rw-----`): + + - /logrotate.d/nms.conf + - /var/log/nms/nms.log + + These changes aim to improve the overall security of the system by restricting access to sensitive configuration files while maintaining necessary privileges for authorized users. + + +### Resolved Issues{#2-11-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Count of NGINX Plus graph has a delay in being populated [(37705)]({{< relref "/nim/releases/known-issues.md#37705" >}}) +- {{% icon-resolved %}} Duplicate Certificate and Key published for managed certificates [(42182)]({{< relref "/nim/releases/known-issues.md#42182" >}}) +- {{% icon-resolved %}} The Metrics module is interrupted during installation on Red Hat 9 [(42219)]({{< relref "/nim/releases/known-issues.md#42219" >}}) +- {{% icon-resolved %}} Certificate file is not updated automatically under certain conditions [(42425)]({{< relref "/nim/releases/known-issues.md#42425" >}}) +- {{% icon-resolved %}} Certificate updates allow for multiples certs to share the same serial number [(42429)]({{< relref "/nim/releases/known-issues.md#42429" >}}) + +### Known Issues{#2-11-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.10.1 + +May 22, 2023 + +### Upgrade Paths {#2-10-1-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.7.0 - 2.10.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### Resolved Issues{#2-10-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Valid licenses incorrectly identified as invalid [(42598)]({{< relref "/nim/releases/known-issues.md#42598" >}}) + +### Known Issues{#2-10-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.10.0 + +April 26, 2023 + +### Upgrade Paths {#2-10-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.7.0 - 2.9.1 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-10-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **New "Category" Filter in the Events web interface** + + You can now filter entries in the Events web interface using a new "Category" filter. Categories for event entries include "Certs", "Instance Groups", and "Templates". + +- {{% icon-feature %}} **New NGINX Agent install flag for NGINX App Protect WAF** + + The NGINX Agent installation script now has a flag to enable the default configuration required for NGINX App Protect WAF. It is used to retrieve the deployment status and `precompiled_publication` mode, with an option for the NGINX App Protect WAF instance to use the mode for policies. + +- {{% icon-feature %}} **NGINX Management Suite version now visible in the web interface and API** + + You can now look up the NGINX Management Suite and NGINX Instance Manager versions in the web interface and API. Other module versions are also visible, though older versions of API Connectivity Manager and Security Monitoring may appear as undefined. + +- {{% icon-feature %}} **NGINX Management Suite can now use NGINX Ingress Controller to manage routing** + + The NGINX Management Suite Helm Chart can now generate an NGINX Ingress Controller VirtualServer definition, which can be used to expose NGINX Management Suite when running in your Kubernetes cluster. + More about the VirtualServer custom resource can be found in the [VirtualServer and VirtualServerRoute](https://docs.nginx.com/nginx-ingress-controller/configuration/virtualserver-and-virtualserverroute-resources/) documentation. + +- {{% icon-feature %}} **Configuration Publication Status now visible in App Security pages.** + + The most recent publication date and status for an instance's configuration is now visible on App Security Pages. This reflects configuration for NGINX, NGINX App Protect policies, Attack Signatures and Threat Campaigns. + +- {{% icon-feature %}} **Instance Manager can now automatically retrieve WAF compilers associated with NGINX App Protect instances** + + Using a user-provided NGINX repository certificate & key after the first set-up of the WAF compiler, Instance Manager can automatically retrieve WAF compilers associated with NGINX App Protect instances. These can be used to publish App Protect WAF configurations in `precompiled_publication` mode. + +- {{% icon-feature %}} **Add option to toggle ICMP scanning in the web interface** + + You can now explicitly enable or disable ICMP scanning at the top of the "Scan" interface. + +- {{% icon-feature %}} **New NGINX Agent install flag for Security Monitoring** + + The NGINX Agent installation script now has a flag to enable the default configuration required for the Security Monitoring module. + + +### Changes in Default Behavior{#2-10-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Improvements to Role Based Access Control for SSL Certificate and Key management** + + Role Based Access Control for SSL Certificate and Key management can now use three different objects for precise controls: certificates, systems, and instance groups. Using certificates as an object controls the viewing and assigning of specific certificate and key pairs. Using systems or instance groups allows a user to see all certificates but restricts access for publishing. + +- {{% icon-feature %}} **By default, NGINX Management Suite is not exposed to the internet when installed with a Helm Chart** + + When NGINX Management Suite is installed using a Helm Chart, it now defaults to a ClusterIP without an external IP address. + + +### Resolved Issues{#2-10-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Installing NGINX Agent on FreeBSD fails with "error 2051: not implemented" [(41157)]({{< relref "/nim/releases/known-issues.md#41157" >}}) +- {{% icon-resolved %}} SELinux errors encountered when starting NGINX Management Suite on RHEL9 with the SELinux policy installed [(41327)]({{< relref "/nim/releases/known-issues.md#41327" >}}) + +### Known Issues{#2-10-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.9.1 + +April 06, 2023 + +### Upgrade Paths {#2-9-1-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.6.0 - 2.9.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### Resolved Issues{#2-9-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} NGINX configurations with special characters may not be editable from the web interface after upgrading Instance Manager [(41557)]({{< relref "/nim/releases/known-issues.md#41557" >}}) + +### Known Issues{#2-9-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.9.0 + +March 21, 2023 + +### Upgrade Paths {#2-9-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.6.0 - 2.8.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-9-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **New webpages for viewing Attack Signature and Threat Campaigns** + + The Instance Manager web interface now allows you to view Attack Signatures and Threat Campaign packages published to instances and instance groups. You can also publish these packages using the precompiled publication mode. + +- {{% icon-feature %}} **NGINX Agent supports Rocky Linux 8 and 9** + + The NGINX Agent now supports Rocky Linux 8 (x86_64, aarch64) and 9 (x86_64, aarch64). The NGINX Agent supports the same distributions as NGINX Plus. For a list of the supported distributions, refer to the [NGINX Plus Technical Specs](https://docs.nginx.com/nginx/technical-specs/#supported-distributions) guide. + +- {{% icon-feature %}} **New Events for CUD actions** + + Events will be triggered for `CREATE`, `UPDATE`, and `DELETE` actions on Templates, Instances, Certificates, Instance Groups, and Licenses. + +- {{% icon-feature %}} **The _Certificate and Keys_ webpage has a new look!** + + Our new and improved _Certificates and Keys_ webpage makes it easier than ever to efficiently manage your TLS certificates. + +- {{% icon-feature %}} **Add commit hash details to NGINX configurations for version control** + + Use the Instance Manager REST API to add a commit hash to NGINX configurations if you use version control, such as Git. + + For more information, see the following topics: + + - [Add Hash Versioning to Staged Configs]({{< relref "/nim/nginx-configs/stage-configs.md#hash-versioning-staged-configs" >}}) + - [Publish Configs with Hash Versioning to Instances]({{< relref "/nim/nginx-configs/publish-configs.md#publish-configs-instances-hash-versioning" >}}) + - [Publish Configs with Hash Versioning to Instance Groups]({{< relref "/nim/nginx-configs/publish-configs.md#publish-configs-instance-groups-hash-versioning" >}}) + + +### Security Updates{#2-9-0-security-updates} + +{{< important >}} +For the protection of our customers, NGINX doesn’t disclose security issues until an investigation has occurred and a fix is available. +{{< /important >}} + +This release includes the following security updates: + +- {{% icon-resolved %}} **Instance Manager vulnerability CVE-2023-1550** + + NGINX Agent inserts sensitive information into a log file ([CVE-2023-1550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-1550)). An authenticated attacker with local access to read NGINX Agent log files may gain access to private keys. This issue is exposed only when the non-default trace-level logging is enabled. + + NGINX Agent is included with NGINX Instance Manager, and used in conjunction with API Connectivity Manager and the Security Monitoring module. + + This issue has been classified as [CWE-532: Insertion of Sensitive Information into Log File](https://cwe.mitre.org/data/definitions/532.html). + + #### Mitigation + + - Avoid configuring trace-level logging in the NGINX Agent configuration file. For more information, refer to the [Configuring the NGINX Agent]({{< relref "/nms/nginx-agent/install-nginx-agent.md#configuring-the-nginx-agent ">}}) section of NGINX Management Suite documentation. If trace-level logging is required, ensure only trusted users have access to the log files. + + #### Fixed in + + - NGINX Agent 2.23.3 + - Instance Manager 2.9.0 + + For more information, refer to the MyF5 article [K000133135](https://my.f5.com/manage/s/article/K000133135). + + +### Changes in Default Behavior{#2-9-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **SSL Certificates can be associated with Instance Groups** + + When assigning SSL certificates for the NGINX data plane, you have the option of associating them with a single instance or with an instance group. When associated with an instance group, the certificates will be shared across all instances in the group. + +- {{% icon-feature %}} **Action required: OIDC configurations for the management plane must be updated after upgrading to Instance Manager 2.9.0** + + OIDC configuration files were modified to improve support for automation and integration in CI/CD pipelines. To continue using OIDC after upgrading to Instance Manager 2.9.0, you'll need to update these configuration files. + + To take advantage of the expanded functionality for OIDC authentication with NGINX Management Suite, we recommend following these two options: + + #### Option 1 + + 1. During the upgrade, type `Y` when prompted to respond `Y or I: install the package mainatiner's version` for each of the following files: + + - `/etc/nms/nginx/oidc/openid_configuration.conf` + - `/etc/nms/nginx/oidc/openid_connect.conf` + - `/etc/nms/nginx/oidc/openid_connect.js` + + 1. After the upgrade finishes, make the following changes to the `/etc/nms/nginx/oidc/openid_configuration.conf` file using the `/etc/nms/oidc/openid_connect.conf.dpkg-old` that was created as a backup: + + - Uncomment the appropriate "Enable when using OIDC with" for your IDP (for example, keycloak, azure). + - Update `$oidc_authz_endpoint` value with the corresponding values from `openid_connect.conf.dpkg-old`. + - Update `$oidc_token_endpoint` value with the corresponding values from `openid_connect.conf.dpkg-old`. + - Update `$oidc_jwt_keyfile` value with the corresponding values from `openid_connect.conf.dpkg-old`. + - Update `$oidc_client` and `oidc_client_secret` with corresponding values from `openid_connect.conf.dpkg-old`. + - Review and restore any other customizations from `openid_connect.conf.dpkg-old` beyond those mentioned above. + + 1. Save the file. + 1. Restart NGINX Management Suite: + + ```bash + sudo systemctl restart nms + ``` + + 1. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +
    + + #### Option 2 + + 1. Before upgrading Instance Manager, edit the following files with your desired OIDC configuration settings: + + - `/etc/nginx/conf.d/nms-http.conf` + - `/etc/nms/nginx/oidc/openid_configuration.conf` + - `/etc/nms/nginx/oidc/openid_connect.conf` + - `/etc/nms/nginx/oidc/openid_connect.js` + + 1. During the upgrade, type `N` when prompted to respond `N or O : keep your currently-installed version`. + 1. After the upgrade finishes replace `etc/nms/nginx/oidc/openid_connect.js` with `openid_connect.js.dpkg-dist`. + 1. Restart NGINX Management Suite: + + ```bash + sudo systemctl restart nms + ``` + + 1. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + + +### Resolved Issues{#2-9-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} After upgrading to NGINX Instance Manager 2.1.0, the web interface reports timeouts when NGINX Agent configs are published [(32349)]({{< relref "/nim/releases/known-issues.md#32349" >}}) +- {{% icon-resolved %}} Scan does not update an unmanaged instance to managed [(37544)]({{< relref "/nim/releases/known-issues.md#37544" >}}) +- {{% icon-resolved %}} "Public Key Not Available" error when upgrading Instance Manager on a Debian-based system [(39431)]({{< relref "/nim/releases/known-issues.md#39431" >}}) +- {{% icon-resolved %}} The Type text on the Instances overview page may be partially covered by the Hostname text [(39760)]({{< relref "/nim/releases/known-issues.md#39760" >}}) +- {{% icon-resolved %}} App Protect: "Assign Policy and Signature Versions" webpage may not initially display newly added policies [(40085)]({{< relref "/nim/releases/known-issues.md#40085" >}}) +- {{% icon-resolved %}} Upgrading NGINX Management Suite may remove the OIDC configuration for the platform [(41328)]({{< relref "/nim/releases/known-issues.md#41328" >}}) + +### Known Issues{#2-9-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.8.0 + +January 30, 2023 + +### Upgrade Paths {#2-8-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.5.0 - 2.7.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-8-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Enhanced details page for SSL Certificates** + + The Instance Manager web interface now features an improved details page for SSL Certificates. This page provides important information about the certificate and any associated instances. + +- {{% icon-feature %}} **Automatic retrieval of Attack Signatures and Threat Campaign updates to Instance Manager** + + Instance Manager now allows you to [set up automatic downloads of the most recent Attack Signature and Threat Campaign packages]({{< relref "/nim/nginx-app-protect/setup-waf-config-management.md##automatically-download-latest-packages" >}}). By publishing these updates to your App Protect instances from Instance Manager, you can ensure your applications are shielded from all recognized attack types. + +- {{% icon-feature %}} **Improved WAF Compiler error messages** + + The messaging around [security policy compilation errors]({{< relref "/nim/nginx-app-protect/manage-waf-security-policies.md#check-for-compilation-errors" >}}) has been improved by providing more detailed information and alerting users if the required compiler version is missing. + + +### Changes in Default Behavior{#2-8-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Switching between storing secrets on disk and using Vault migrates secrets** + + When transitioning between storing secrets on disk or using HashiCorp Vault, any existing secrets can be easily migrated to the new storage method. For instructions, refer to the guide [Configure Vault for Storing Secrets]({{< relref "/nim/system-configuration/configure-vault.md" >}}). + +- {{% icon-feature %}} **Create roles using either an object name or UID** + + You can now use either an object name or a unique identifier (UID) when assigning object-level permissions while creating or editing a role via the Instance Manager REST API. + +- {{% icon-feature %}} **Upgrading from 2.7 or earlier, you must re-enable `precompiled_publication` to continue publishing security policies with Instance Manager** + + To continue publishing security policies with Instance Manager if you are upgrading from Instance Manager 2.7 and earlier, you must set the `precompiled_publication` parameter to `true` in the `nginx-agent.conf` file. + + In Instance Manager 2.7 and earlier, the `pre-compiled_publication` setting was set to `true` by default. However, starting with Instance Manager 2.8, this setting is set to `false` by default. This means you will need to change this setting to `true` again when upgrading from earlier versions. + + To publish App Protect policies from Instance Manager, add the following to your `nginx-agent.conf` file: + + ```yaml + nginx_app_protect: + precompiled_publication: true + ``` + + +### Resolved Issues{#2-8-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Web interface reports no license found when a license is present [(30647)]({{< relref "/nim/releases/known-issues.md#30647" >}}) +- {{% icon-resolved %}} Associating instances with expired certificates causes internal error [(34182)]({{< relref "/nim/releases/known-issues.md#34182" >}}) +- {{% icon-resolved %}} Publishing to an Instance/instance-group will fail when the configuration references a JSON policy or a JSON log profile [(38357)]({{< relref "/nim/releases/known-issues.md#38357" >}}) +- {{% icon-resolved %}} Missing dimension data for Advanced Metrics with modules [(38634)]({{< relref "/nim/releases/known-issues.md#38634" >}}) +- {{% icon-resolved %}} Large payloads can result in disk I/O error for database operations [(38827)]({{< relref "/nim/releases/known-issues.md#38827" >}}) +- {{% icon-resolved %}} The Policy API endpoint only allows NGINX App Protect policy upsert with content length upto 3.14MB. [(38839)]({{< relref "/nim/releases/known-issues.md#38839" >}}) +- {{% icon-resolved %}} Deploy NGINX App Protect policy is listed as "Not Deployed" on the Policy Version detail page [(38876)]({{< relref "/nim/releases/known-issues.md#38876" >}}) +- {{% icon-resolved %}} NGINX Management Suite services may lose connection to ClickHouse in a Kubernetes deployment [(39285)]({{< relref "/nim/releases/known-issues.md#39285" >}}) +- {{% icon-resolved %}} NGINX App Protect status may not be displayed after publishing a configuration with a security policy and certificate reference [(39382)]({{< relref "/nim/releases/known-issues.md#39382" >}}) +- {{% icon-resolved %}} Security Policy Snippet selector adds incorrect path reference for policy directive [(39492)]({{< relref "/nim/releases/known-issues.md#39492" >}}) +- {{% icon-resolved %}} The API Connectivity Manager module won't load if the Security Monitoring module is enabled [(39943)]({{< relref "/nim/releases/known-issues.md#39943" >}}) +- {{% icon-resolved %}} The API Connectivity Manager module won't load if the Security Monitoring module is enabled [(44433)]({{< relref "/nim/releases/known-issues.md#44433" >}}) + +### Known Issues{#2-8-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.7.0 + +December 20, 2022 + +### Upgrade Paths {#2-7-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.4.0 - 2.6.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### Changes in Default Behavior{#2-7-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **NGINX App Protect upgrades are supported** + + You can upgrade NGINX App Protect WAF on managed instances where Instance Manager publishes NGINX App Protect policies and configurations. For example, upgrade from App Protect release 3.12.2 to release 4.0. + +- {{% icon-feature %}} **NGINX Management Suite Config file is now in YAML format** + + With the release of NGINX Instance Manager 2.7, the NGINX Management Suite configuration file is now in YAML format. Through the upgrade process, your existing configuration will automatically be updated. Any settings you have customized will be maintained in the new format. If you have existing automation tooling for the deployment of the NGINX Management Suite that makes changes to the configuration file, you will need to update it to account for the change. + +- {{% icon-feature %}} **Existing NGINX Agent configuration kept during upgrade to the latest version** + + When upgrading NGINX Agent, the existing NGINX Agent configuration is maintained during the upgrade. If the Agent configuration is not present in `/etc/nginx-agent/nginx-agent.conf`, a default configuration is provided after NGINX Agent installation. + + +### Resolved Issues{#2-7-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Instance Manager reports old NGINX version after upgrade [(31225)]({{< relref "/nim/releases/known-issues.md#31225" >}}) +- {{% icon-resolved %}} Instance Manager returns a "Download failed" error when editing an NGINX config for instances compiled and installed from source [(35851)]({{< relref "/nim/releases/known-issues.md#35851" >}}) +- {{% icon-resolved %}} Null data count is not correctly represented in the NGINX Plus usage graph. [(38206)]({{< relref "/nim/releases/known-issues.md#38206" >}}) +- {{% icon-resolved %}} When upgrading Instance Manager from v2.4 to later versions of Instance Manager, certificate associations are no longer visible. [(38641)]({{< relref "/nim/releases/known-issues.md#38641" >}}) +- {{% icon-resolved %}} NGINX App Protect policy deployment status not reflecting removal of associated instance. [(38700)]({{< relref "/nim/releases/known-issues.md#38700" >}}) +- {{% icon-resolved %}} When upgrading a multi-node NMS deployment with helm charts the ingestion pod may report a "Mismatched migration version" error [(38880)]({{< relref "/nim/releases/known-issues.md#38880" >}}) +- {{% icon-resolved %}} After a version upgrade of NGINX Instance Manager, NMS Data Plane Manager crashes if you publish NGINX configuration with App Protect enablement directive (app_protect_enable) set to ON [(38904)]({{< relref "/nim/releases/known-issues.md#38904" >}}) + +### Known Issues{#2-7-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.6.0 + +November 17, 2022 + +### Upgrade Paths {#2-6-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.3.0 - 2.5.1 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-6-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Manage and deploy configurations to NGINX App Protect WAF Instances** + + This release introduces the following features to [manage and deploy configurations to NGINX App Protect instances]({{< relref "/nim/nginx-app-protect/overview-nap-waf-config-management.md" >}}): + + - Create, upsert, and delete NGINX App Protect WAF security policies + - Manage NGINX App Protect WAF security configurations by using the NGINX Management Suite user interface or REST API + - Update Signatures and Threat Campaign packages + - Compile security configurations into a binary bundle that can be consumed by NGINX App Protect WAF instances + +- {{% icon-feature %}} **Adds support for RHEL 9** + + Instance Manager 2.6 supports RHEL 9. See the [Technical Specifications Guide]({{< relref "/nim/fundamentals/tech-specs#distributions" >}}) for details. + +- {{% icon-feature %}} **Support for using HashiCorp Vault for storing secrets** + + NGINX Management Suite now supports the use of Hashicorp Vault to store secrets such as SSL Certificates and Keys. Use of a new or existing Vault deployment is supported. + +- {{% icon-feature %}} **Graph and additional data are included in NGINX Plus usage tracking interface** + + On the NGINX Plus usage tracking page, the number of NGINX Plus instances used over time is available in a graph. You can also view the minimum, maximum, and average count of concurrent unique instances in a given time period. + +- {{% icon-feature %}} **Adds support for Oracle 8** + + Oracle 8 is now [a supported distribution]({{< relref "/nim/fundamentals/tech-specs#distributions" >}}) starting with Instance Manager 2.6. You can use the RedHat/CentOS distro to install the Oracle 8 package. + + +### Changes in Default Behavior{#2-6-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **GET Roles API responses now include user and group associations** + + `GET /roles` and `GET/roles/{roleName}` API responses include any user(s) or group(s) associated with a role now. + + +### Resolved Issues{#2-6-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Password error "option unknown" occurs when installing NGINX Instance Manager on Ubuntu with OpenSSL v1.1.0 [(33055)]({{< relref "/nim/releases/known-issues.md#33055" >}}) +- {{% icon-resolved %}} Instance Manager reports the NGINX App Protect WAF build number as the version [(37510)]({{< relref "/nim/releases/known-issues.md#37510" >}}) + +### Known Issues{#2-6-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.5.1 + +October 11, 2022 + +### Upgrade Paths {#2-5-1-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.2.0 - 2.5.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### Resolved Issues{#2-5-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Extended NGINX metrics aren't reported for NGINX Plus R26 and earlier [(37738)]({{< relref "/nim/releases/known-issues.md#37738" >}}) + +### Known Issues{#2-5-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.5.0 + +October 04, 2022 + +### Upgrade Paths {#2-5-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.2.0 - 2.4.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-5-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Track NGINX Plus usage over time** + + When viewing your NGINX Plus instances in the Instnace Manager web interface, you can set a date and time filter to review the [NGINX Plus instance count]({{< relref "/nim/admin-guide/license/report-usage-connected-deployment.md" >}}) for a specific period. Also, you can use the Instance Manager REST API to view the lowest, highest, and average number of NGINX Plus instances over time. + +- {{% icon-feature %}} **New helm charts for each release of Instance Manager** + + Each release of Instance Manager now includes a helm chart, which you can use to easily [install Instance Manager on Kubernetes]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md" >}}). You can download the helm charts from [MyF5](https://my.f5.com/manage/s/downloads). + + +### Resolved Issues{#2-5-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} OIDC is not supported for helm chart deployments [(33248)]({{< relref "/nim/releases/known-issues.md#33248" >}}) +- {{% icon-resolved %}} Managed certificates may be overwritten if they have the same name on different datapath certificates [(36240)]({{< relref "/nim/releases/known-issues.md#36240" >}}) +- {{% icon-resolved %}} Scan overview page doesn't scroll to show the full list of instances [(36514)]({{< relref "/nim/releases/known-issues.md#36514" >}}) + +### Known Issues{#2-5-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.4.0 + +August 16, 2022 + +### Upgrade Paths {#2-4-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.1.0 - 2.3.1 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-4-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Get notified about critical events** + + Instance Manager 2.4 adds a notifications panel to the web interface. After logging in to NGINX Management Suite, select the notification bell at the top of the page to view critical system events (`WARNING` or `ERROR` level events). Future releases will support additional notification options. + +- {{% icon-feature %}} **See which of your NGINX Plus instances have NGINX App Protect installed** + + Now, when you [view your NGINX Plus inventory]({{< relref "/nim/admin-guide/license/report-usage-connected-deployment.md" >}}), you can see which instances have [NGINX App Protect](https://www.nginx.com/products/nginx-app-protect/) installed. NGINX App Protect is a modern app‑security solution that works seamlessly in DevOps environments as a robust WAF or app‑level DoS defense, helping you deliver secure apps from code to customer + + +### Changes in Default Behavior{#2-4-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **You no longer need to associate a certificate with an instance using the web interface** + + NGINX Management Suite will automatically deploy a certificate to an NGINX instance if the instance's config references the certificate on the NMS platform. + +- {{% icon-feature %}} **Adds nms-integrations service** + + This release adds a new service called `nms-integerations`. This service is for future integrations; no user management or configuration is needed at this time. + + +### Resolved Issues{#2-4-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Unable to publish config changes to a custom nginx.conf location [(35276)]({{< relref "/nim/releases/known-issues.md#35276" >}}) + +### Known Issues{#2-4-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.3.1 + +July 21, 2022 + +### Upgrade Paths {#2-3-1-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.0.0 - 2.3.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### Security Updates{#2-3-1-security-updates} + +{{< important >}} +For the protection of our customers, NGINX doesn’t disclose security issues until an investigation has occurred and a fix is available. +{{< /important >}} + +This release includes the following security updates: + +- {{% icon-resolved %}} **Instance Manager vulnerability CVE-2022-35241** + + In versions of 2.x before 2.3.1 and all versions of 1.x, when Instance Manager is in use, undisclosed requests can cause an increase in disk resource utilization. + + This issue has been classified as [CWE-400: Uncontrolled Resource Consumption](https://cwe.mitre.org/data/definitions/400.html). + + For more information, refer to the AskF5 article [K37080719](https://support.f5.com/csp/article/K37080719). + + +### Known Issues{#2-3-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.3.0 + +June 30, 2022 + +### Upgrade Paths {#2-3-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.0.0 - 2.2.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-3-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Instance Manager provides information about your NGINX App Protect WAF installations** + + You can configure NGINX Agent to report the following NGINX App Protect WAF installation information to NGINX Management Suite: + + - The current version of NGINX App Protect WAF + - The current status of NGINX App Protect WAF (active or inactive) + - The Attack Signatures package version + - The Threat Campaigns package version + +- {{% icon-feature %}} **View a summary of your instances' most important metrics for the last 24 hours** + + This release adds a **Metrics Summary** page, from which you can view key system, network, HTTP request, and connection metrics at a glance for the last 24 hours. After logging in to Instance Manager, select an instance on the **Instances Overview** page, then select the **Metrics Summary** tab. + +- {{% icon-feature %}} **Track the details for your NGINX Plus instances** + + Easily track your NGINX Plus instances from the new NGINX Plus inventory list page. [View the current count for all your NGINX Plus instances]({{< relref "/nim/admin-guide/license/report-usage-connected-deployment.md" >}}), as well as each instance's hostname, UID, version, and the last time each instance was reported to Instance Manager. Select the `Export` button to export the list of NGINX Plus instances to a `.csv` file. + +- {{% icon-feature %}} **Explore events in NGINX Instance Manager with the Events Catalogs API** + + This release introduces a Catalogs API endpoint specifically for viewing NGINX Instance Manager events and corresponding information. You can access the endpoint at `/analytics/catalogs/events`. + +- {{% icon-feature %}} **Support for provisioning users and user groups with SCIM** + + Now, you can [use SCIM to provision, update, or deprovision users and user groups]({{< relref "/nim/admin-guide/authentication/oidc/scim-provisioning.md" >}}) for your Identity Provider to NGINX Instance Manager. SCIM, short for "[System for Cross-domain Identity Management](http://www.simplecloud.info)," is an open API for managing identities. + +- {{% icon-feature %}} **Adds support for Ubuntu 22.04** + + The NGINX Management Suite, which includes NGINX Instance Manager, now supports Ubuntu 22.04 (Jammy). + + Refer to the [Technical Specifications Guide]({{< relref "/nim/fundamentals/tech-specs" >}}) for details. + + +### Changes in Default Behavior{#2-3-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **New login screen** + + Sometimes it's the small things that count. Now, when logging in to NGINX Instance Manager, you're treated to an attractive-looking login screen instead of a bland system prompt. 🤩 + + +### Resolved Issues{#2-3-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Post-install steps to load SELinux policy are in the wrong order [(34276)]({{< relref "/nim/releases/known-issues.md#34276" >}}) + +### Known Issues{#2-3-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.2.0 + +May 25, 2022 + +### Upgrade Paths {#2-2-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.0.0 - 2.1.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-2-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **New events for NGINX processes and configuration rollbacks** + + Now, you can use the [NGINX Instance Manager Events API]({{< relref "/nim/monitoring/view-events-metrics.md" >}}) or [web interface]({{< relref "/nim/monitoring/view-events-metrics.md" >}}) to view events when NGINX instances start and reload or when a configuration is rolled back. + +- {{% icon-feature %}} **Filter events and metrics with custom date and time ranges** + + Now you can filter [events]({{< relref "/nim/monitoring/view-events-metrics" >}}) and [metrics]({{< relref "/nim/monitoring/view-events-metrics" >}}) using a custom date and time range. Select **Custom time range** in the filter list, then specify the date and time range you want to use. + +- {{% icon-feature %}} **Role-based access control added to Events and Metrics pages** + + A warning message is shown when users try to view the Events and Metrics pages in the web interface if they don't have permission to access the Analytics feature. For instructions on assigning access to features using role-based access control (RBAC), see [Set Up RBAC]({{< relref "/nim/admin-guide/rbac/overview-rbac.md" >}}). + +- {{% icon-feature %}} **Modules field added to Metrics and Dimensions catalogs** + + A `modules` field was added to the [Metics]({{< relref "nms/reference/catalogs/metrics.md" >}}) and [Dimensions]({{< relref "nms/reference/catalogs/dimensions.md" >}}) catalogs. This field indicates which module or modules the metric or dimension belongs to. + +- {{% icon-feature %}} **Adds reporting for NGINX worker metrics (API only)** + + The NGINX Agent now gathers metrics for NGINX workers. You can access these metrics using the NGINX Instance Manager Metrics API. + + The following worker metrics are reported: + + - The count of NGINX workers + - CPU, IO, and memory usage + + +### Resolved Issues{#2-2-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Running Agent install script with sh returns “not found” error [(33385)]({{< relref "/nim/releases/known-issues.md#33385" >}}) + +### Known Issues{#2-2-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.1.0 + +April 05, 2022 + +### Upgrade Paths {#2-1-0-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.0.0 - 2.0.1 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### What's New{#2-1-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Adds Docker support for NGINX Agent** + + Now you can collect metrics about the Docker containers that the NGINX Agent is running in. The NGINX Agent uses the available cgroup files to calculate metrics like CPU and memory usage. + + If you have multiple Docker containers on your data plane host, each container registers with Instance Manager as unique. + + Refer to the [NGINX Agent Docker Support](https://docs.nginx.com/nginx-agent/installation-upgrade/container-environments/docker-support/) guide for details. + + {{< note >}}Containerizing the NGINX Agent is supported only with Docker at the moment. Look for additional container support in future releases of Instance Manager.{{< /note >}} + +- {{% icon-feature %}} **Redesigned metrics views in the web interface** + + The metrics pages in the web interface have been revised and improved. + + See the [View Metrics]({{< relref "/nim/monitoring/view-events-metrics" >}}) topic to get started. + +- {{% icon-feature %}} **New RBAC lets you limit access to NGINX Instance Manager features** + + RBAC has been updated and improved. Add users to roles -- or add users to user groups if you're using an external identity provider -- to limit access to Instance Manager features. + + For more information, see the tutorial [Set Up RBAC]({{< relref "/nim/admin-guide/rbac/overview-rbac.md" >}}). + +- {{% icon-feature %}} **Improved certificate handling** + + Stability and performance improvements for managing certificates using the web interface. + +- {{% icon-feature %}} **View events for your NGINX instances** + + Now you can use the Instance Manager API or web interface to view events for your NGINX instances. + + See the [View Events]({{< relref "/nim/monitoring/view-events-metrics" >}}) and [View Events (API)]({{< relref "/nim/monitoring/view-events-metrics" >}}) topics for instructions. + +- {{% icon-feature %}} **Deploy NGINX Instance Manager on Kubernetes using a helm chart** + + We recommend using the Instance Manager helm chart to install Instance Manager on Kubernetes. + + Among the benefits of deploying from a helm chart, the chart includes the required services, which you can scale independently as needed; upgrades can be done with a single helm command; and there's no requirement for root privileges. + + For instructions, see [Install from a Helm Chart]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md" >}}). + + +### Changes in Default Behavior{#2-1-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Tags are no longer enforced for RBAC or set when creating or updating a role** + + If you're using tags for RBAC on an earlier version of Instance Manager, you'll need to re-create your roles after upgrading. Tags assigned to instances for the purpose of RBAC won't be honored after you upgrade. + +- {{% icon-feature %}} **The DeploymentDetails API now requires values for `failure` and `success`** + + The DeploymentDetails API spec has changed. Now, the `failure` and `success` fields are required. The values can be an empty array or an array of UUIDs of NGINX instances. + + Endpoint: `/systems/instances/deployments/{deploymentUid}` + + Example JSON Response + + ```json + { + "createTime": "2022-04-18T23:09:16Z", + "details": { + "failure": [ ], + "success": [ + { + "name": "27de7cb8-f7d6-3639-b2a5-b7f48883aee1" + } + ] + }, + "id": "07c6101e-27c9-4dbb-b934-b5ed75e389e0", + "status": "finalized", + "updateTime": "2022-04-18T23:09:16Z" + } + ``` + + +### Resolved Issues{#2-1-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Unable to register multiple NGINX Agents in containers on the same host [(30780)]({{< relref "/nim/releases/known-issues.md#30780" >}}) +- {{% icon-resolved %}} Include cycles in the configuration cause analyzer to spin. [(31025)]({{< relref "/nim/releases/known-issues.md#31025" >}}) +- {{% icon-resolved %}} System reports "error granting scope: forbidden" if user granting permissions belongs to more than one role [(31215)]({{< relref "/nim/releases/known-issues.md#31215" >}}) +- {{% icon-resolved %}} When using Instance Groups, tag-based access controls are not enforced [(31267)]({{< relref "/nim/releases/known-issues.md#31267" >}}) +- {{% icon-resolved %}} Bad Gateway (502) errors with Red Hat 7 [(31277)]({{< relref "/nim/releases/known-issues.md#31277" >}}) + +### Known Issues{#2-1-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.0.1 + +January 27, 2022 + +### Upgrade Paths {#2-0-1-upgrade-paths} + +Instance Manager supports upgrades from these previous versions: + +- 2.0.0 + +If your Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +### Resolved Issues{#2-0-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Unable to access the NGINX Instance Manager web interface after loading SELinux policy [(31583)]({{< relref "/nim/releases/known-issues.md#31583" >}}) +- {{% icon-resolved %}} The `nms-dpm` service restarts when registering multiple NGINX Agents with the same identity [(31612)]({{< relref "/nim/releases/known-issues.md#31612" >}}) + +### Known Issues{#2-0-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + +--- + +## 2.0.0 + +December 21, 2021 +### What's New{#2-0-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **(Experimental) Share a configuration across multiple instances** + + With a feature called **Instance Groups**, you can share the same configuration across multiple instances. So, if your website requires a number of instances to support the load, you can publish the same configuration to each instance with ease. + +- {{% icon-feature %}} **More metrics and instance dashboards** + + Instance Manager now collects additional metrics from the NGINX instances. We also added pre-configured dashboards to the web interface for each NGINX instance managed by Instance Manager. See the [Catalog Reference]({{< relref "/nms/reference/catalogs/_index.md" >}}) documentation for a complete list of metrics. + +- {{% icon-feature %}} **New architecture!** + + We redesigned and improved the architecture of Instance Manager! + +- {{% icon-feature %}} **Improved user access control** + + Instance Manager 2.x. allows you to create user access controls with tags. Administrators can grant users read or write access to perform instance management tasks. And admins can grant or restrict access to the Settings options, such as managing licenses and creating users and roles. See the [Set up Authentication]({{< relref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md#rbac" >}}) guide for more details. + + +### Known Issues{#2-0-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nim/releases/known-issues.md" >}}) topic. + diff --git a/content/nim/system-configuration/_index.md b/content/nim/system-configuration/_index.md new file mode 100644 index 000000000..4be720de0 --- /dev/null +++ b/content/nim/system-configuration/_index.md @@ -0,0 +1,6 @@ +--- +title: System configuration +weight: 50 +url: /nginx-instance-manager/system-configuration/ + +--- diff --git a/content/nim/system-configuration/configure-clickhouse.md b/content/nim/system-configuration/configure-clickhouse.md new file mode 100644 index 000000000..e7ed7e33c --- /dev/null +++ b/content/nim/system-configuration/configure-clickhouse.md @@ -0,0 +1,93 @@ +--- +description: '' +docs: DOCS-998 +doctypes: +- task +tags: +- docs +title: Configure ClickHouse +toc: true +weight: 100 +--- + +## Overview + +Follow the steps in this guide to update the `/etc/nms/nms.conf` file if you used a custom address, username, or password or enabled TLS when installing ClickHouse. F5 NGINX Management Suite requires this information to connect to ClickHouse. + +## Default ClickHouse Values {#default-values} + +Unless specified differently in the `/etc/nms/nms.conf` file, NGINX Management Suite uses the following values for ClickHouse by default: + +{{< include "installation/clickhouse-defaults.md" >}} + +--- + +## Use Custom Address, Username, Password + +If your ClickHouse installation has a different address, username, or password than the default values, you need to configure NGINX Management Suite to connect to ClickHouse. + +To set custom values for the ClickHouse address, username, and password: + +1. On the NGINX Management Suite server, open the `/etc/nms/nms.conf` file for editing. +2. Change the following settings to match your ClickHouse configuration: + + ``` yaml + clickhouse: + address: tcp://localhost:9000 + username: + password: + ``` + +3. Save the changes and close the file. + +--- + +## Configure TLS + +If you configured ClickHouse to work with TLS, take the following steps to update the settings in the NGINX Management Suite `nms.conf` file: + +1. On the NGINX Management Suite server, open the `/etc/nms/nms.conf` file for editing. +2. Configure the `clickhouse` TSL settings to suit your environment: + + ```yaml + clickhouse: + + # Sets the log level for ClickHouse processes within NMS. + log_level: debug + + # Sets the address that will be used to connect to ClickHouse. + address: 127.0.0.1:9001 + + ## Note: Username and password should only be set, if you have custom defined username and password for ClickHouse. + ## Ensure that any configured username or password is wrapped in single quotes. + # Sets the username that will be used to connect to ClickHouse. + username: 'test-1' + + # Sets the password that will be used to connect to ClickHouse. + password: 'test-2' + + # Activates or deactivates TLS for connecting to ClickHouse. + # Note: `tls_mode` will be deprecated in the future, use the `tls` key to enable TLS connection for ClickHouse. + tls_mode: true + + tls: + # Sets the address (form )used to connect to ClickHouse with a TLS connection. + address: 127.0.0.1:9441 + + # Activates or deactivates TLS verification of ClickHouse connection. + skip_verify: false + + # Sets the path of the certificate used for TLS connections in PEM encoded format. + cert_path: /etc/certs + + # Sets the path of the client key used for TLS connections in PEM encoded format. + key_path: /etc/key + + # Sets the path of the Certificate Authority installed on the system for verifying certificates. + cert_ca: /etc/ca + + # Sets directory containing ClickHouse migration files. + migrations_path: /test/migrations + ``` + +3. Save the changes and close the file. diff --git a/content/nim/system-configuration/configure-gateway.md b/content/nim/system-configuration/configure-gateway.md new file mode 100644 index 000000000..ae8c98f58 --- /dev/null +++ b/content/nim/system-configuration/configure-gateway.md @@ -0,0 +1,59 @@ +--- +description: Follow the steps in this guide to fine-tune the NGINX proxy gateway for + F5 NGINX Management Suite to support large data planes running numerous NGINX Agents. +docs: DOCS-1131 +doctypes: +- tutorial +tags: +- docs +title: Optimize NGINX Proxy Gateway for Large Data Planes +toc: true +weight: 400 +--- + +## Overview + +If the NGINX proxy gateway for F5 NGINX Management Suite alerts you that there are not enough worker connections, you may need to modify the NGINX configuration (`/etc/nginx/nginx.conf` on the NGINX Management Suite host) to allow more worker connections and increase the number of file descriptors for worker processes. + +## Configure Worker Connections + +By default, the NGINX Management Suite's NGINX configuration (`/etc/nginx/nginx.conf`) allows 1024 worker connections (`worker_connections`). However, this default may be insufficient if you have a large data plane with numerous NGINX Agents. To ensure optimal performance, we suggest allowing **twice as many worker connections as the number of NGINX Agents** you want to support. This is because each NGINX Agent requires two persistent gRPC connections to the NGINX Management Suite. If you have 1,000 NGINX Agents, for example, you should allow around 2,000 worker connections. + +You may also want to adjust the maximum number of file descriptors (`worker_rlimit_nofile`) that a process can open to align with the number of worker connections. Note that `rlimit_nofile` is a system setting, so make sure to check the user limits for your Linux distribution, as these may be more restrictive. + +To update the number of worker connections and file descriptors, edit the NGINX configuration file (`/etc/nginx/nginx.conf`) on the NGINX Management Suite host. For more information on the NGINX worker connection and file descriptor settings, refer to the following NGINX Core topics: + +- [NGINX worker_connections](http://nginx.org/en/docs/ngx_core_module.html#worker_connections) +- [NGINX worker_rlimit_nofile](http://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile) + +## Configure GRPC for Agents + +By default, the NGINX Management Suite's NGINX configuration (`/etc/nginx/conf.d/nms-http.conf`) times out the gRPC connection from Agents at 10 minutes using the client body timeout (`client_body_timeout`). You can adjust this value to suit your needs; a lower value will time out gRPC connection from aborted Agent connections faster. An aborted Agent connection can happen when the Agent disconnects unexpectedly from the network without going through the gRPC protocol teardown process. + +To update the timeout value, edit the NGINX Management Suite's NGINX configuration file (`/etc/nginx/conf.d/nms-http.conf`) on the NGINX Management Suite host. For example: + +```nginx + # gRPC service for metric ingestion + location /f5.nginx.agent.sdk.MetricsService { + # uncomment to enable mTLS with agent + # auth_request /check-agent-client-cert; + include /etc/nms/nginx/errors-grpc.loc_conf; + grpc_socket_keepalive on; + grpc_read_timeout 5m; + grpc_send_timeout 5m; + client_body_timeout 10m; + grpc_pass grpc://ingestion-grpc-service; + } + + # gRPC service for DPM + location /f5.nginx.agent.sdk.Commander { + # uncomment to enable mTLS with agent + # auth_request /check-agent-client-cert; + include /etc/nms/nginx/errors-grpc.loc_conf; + grpc_socket_keepalive on; + grpc_read_timeout 5m; + grpc_send_timeout 5m; + client_body_timeout 10m; + grpc_pass grpc://dpm-grpc-service; + } +``` diff --git a/content/nim/system-configuration/configure-selinux.md b/content/nim/system-configuration/configure-selinux.md new file mode 100644 index 000000000..5881762e4 --- /dev/null +++ b/content/nim/system-configuration/configure-selinux.md @@ -0,0 +1,170 @@ +--- +description: Learn how to load the provided F5 NGINX Management Suite SELinux policy + to secure your NGINX Management Suite deployment. +docs: DOCS-796 +doctypes: +- task +tags: +- docs +title: Configure SELinux +toc: true +weight: 250 +--- + +{{< shortversions "2.0.0" "latest" "nimvers" >}} + +## Overview + +You can use the optional SELinux policy module included in the package to secure F5 NGINX Management Suite operations with flexible, mandatory access control that follows the principle of least privilege. + +The scope of the SELinux policy allows NGINX Management Suite to perform all operations needed to support the default configuration. This includes inter-process communication on the default Unix sockets and TCP as an alternative. Other changes may require manual adjustments to the default policy for the application to work. + +{{< important >}}The attached SELinux policy module is optional. As such, the module is not loaded automatically during installation even on SELinux-enabled systems. You must manually load the policy module as detailed in the following steps.{{< /important >}} + +--- + +## Before You Begin + +To complete this tutorial, take the following preparatory steps: + +1. Enable SELinux on your system. +2. Install the following tools: `load_policy`, `semodule`, and `restorecon`. +3. [Install NGINX Management Suite]({{< relref "/nim/deploy/_index.md" >}}) with SELinux module files in place. + +{{< important >}}SELinux can be configured to use `permissive` mode. In `permissive` mode, policy violations are logged instead of enforced. Make sure you know which mode your SELinux configuration uses.{{< /important >}} + +--- + +## Install NGINX Management Suite Policy {#selinux-install} + +The NGINX Management Suite installer places the SELinux policy files in the following locations: + +- `/usr/share/selinux/packages/nms.pp` - loadable binary policy module +- `/usr/share/selinux/devel/include/contrib/nms.if` - interface definitions file +- `/usr/share/man/man8/nms_selinux.8.gz` - policy man page + +You can interact with these files to learn about the policy. See the following section for steps on how to load the policy. + +### Load Policy and Set Default Labels {#selinux-server} + +To use the SELinux policy that's included with NGINX Management Suite, take the following steps: + +1. Load the NGINX Management Suite policy: + + ```bash + sudo semodule -n -i /usr/share/selinux/packages/nms.pp + sudo /usr/sbin/load_policy + ``` + +1. Run the following commands to restore the default SELinux labels for the files and directories related to NGINX Management suite: + + ```bash + sudo restorecon -F -R /usr/bin/nms-core + sudo restorecon -F -R /usr/bin/nms-dpm + sudo restorecon -F -R /usr/bin/nms-ingestion + sudo restorecon -F -R /usr/bin/nms-integrations + sudo restorecon -F -R /usr/lib/systemd/system/nms.service + sudo restorecon -F -R /usr/lib/systemd/system/nms-core.service + sudo restorecon -F -R /usr/lib/systemd/system/nms-dpm.service + sudo restorecon -F -R /usr/lib/systemd/system/nms-ingestion.service + sudo restorecon -F -R /usr/lib/systemd/system/nms-integrations.service + sudo restorecon -F -R /var/lib/nms/modules/manager.json + sudo restorecon -F -R /var/lib/nms/modules.json + sudo restorecon -F -R /var/lib/nms/streaming + sudo restorecon -F -R /var/lib/nms + sudo restorecon -F -R /var/lib/nms/dqlite + sudo restorecon -F -R /var/run/nms + sudo restorecon -F -R /var/lib/nms/modules + sudo restorecon -F -R /var/log/nms + ``` + +1. Restart the NGINX Management Suite services: + + ```bash + sudo systemctl restart nms + ``` + +### Add Ports to SELinux Context {#selinux-ports-add} + +NGINX Management Suite uses the `nms_t` context in the policy module. The following example shows how to add a new port to the context. You should add external ports to the firewall exception list. Note, as a system admin, you're responsible for any custom configurations that differ from the default policy. + +To add TCP ports `10000` and `11000` to the `nmx_t` context, run the following commands: + +```bash +sudo semanage port -a -t nms_port_t -p tcp 10000 +sudo semanage port -a -t nms_port_t -p tcp 11000 +``` + +If you've already defined the port context, use `-m`: + +```bash +sudo semanage port -m -t nms_port_t -p tcp 10000 +sudo semanage port -m -t nms_port_t -p tcp 11000 +``` + +Verify the port has the correct label by running the the following `seinfo --portcon` commands: + +``` bash +$ seinfo --portcon=10000 + +Portcon: 4 + portcon sctp 1024-65535 system_u:object_r:unreserved_port_t:s0 + portcon tcp 10000 system_u:object_r:nms_port_t:s0 + portcon tcp 1024-32767 system_u:object_r:unreserved_port_t:s0 + portcon udp 1024-32767 system_u:object_r:unreserved_port_t:s0 + +$ seinfo --portcon=11000 + +Portcon: 4 + portcon sctp 1024-65535 system_u:object_r:unreserved_port_t:s0 + portcon tcp 1024-32767 system_u:object_r:unreserved_port_t:s0 + portcon tcp 11000 system_u:object_r:nms_port_t:s0 + portcon udp 1024-32767 system_u:object_r:unreserved_port_t:s0 +``` + +### Remove Ports from SELinux Context {#selinux-ports-remove} + +If you uninstall NGINX Management Suite, you should remove the ports. To do this, run the following commands: + +```bash +sudo semanage port -d -t nms_t 10000 +sudo semanage port -d -t nms_t 11000 +``` + +--- + +## Enabling SELinux for NGINX Agent {#selinux-agent} + +The following SELinux files are added when installing the NGINX Agent package: + +- `/usr/share/selinux/packages/nginx_agent.pp` - loadable binary policy module +- `/usr/share/selinux/devel/include/contrib/nginx_agent.if` - interface definitions file +- `/usr/share/man/man8/nginx_agent_selinux.8.gz` - policy man page + +To load the NGINX Agent policy, run the following commands: + +{{< include "installation/agent-selinux.md" >}} + +### Add Ports to NGINX Agent SELinux Context + +You can configure the NGINX Agent to work with SELinux. Make sure you add external ports to the firewall exception list. + +The following example shows how to allow external ports outside the HTTPD context. You may need to enable NGINX to connect to these ports. + +```bash +sudo setsebool -P httpd_can_network_connect 1 +``` + +{{}}For additional information on using NGINX with SELinux, refer to the guide [Using NGINX and NGINX Plus with SELinux](https://www.nginx.com/blog/using-nginx-plus-with-selinux/).{{}} + +--- + +## Recommended Resources + +- +- +- +- +- +- +- diff --git a/content/nim/system-configuration/configure-telemetry.md b/content/nim/system-configuration/configure-telemetry.md new file mode 100644 index 000000000..e39b50d8f --- /dev/null +++ b/content/nim/system-configuration/configure-telemetry.md @@ -0,0 +1,80 @@ +--- +docs: DOCS-1269 +doctypes: +- task +tags: +- docs +title: Configure Telemetry and Web Analytics +toc: true +weight: 260 +--- + +## Overview + +The F5 NGINX Management Suite platform lets you share telemetry and web analytics data with F5 NGINX. This data provides valuable insights into software usage and adoption, which NGINX uses to improve product development and support customers worldwide. This document provides an overview of the transmitted data, instructions for enabling or disabling the features, and instructions for configuring firewalls. + +## Telemetry + +NGINX Management Suite sends a limited set of telemetry data to NGINX for analysis. This data is associated only with the subscription ID from the applied license and does not include any personally identifiable information or specific details about the management plane, data plane, or other details. + +The purpose of collecting this telemetry data is to: + +- Drive and validate product development decisions to ensure optimal outcomes for users. +- Assist the Customer Success and Support teams in helping users achieve their goals. +- Fulfill Flexible Consumption Program reporting requirements by automatically reporting product usage to F5 NGINX, saving users time. + +By sharing this telemetry data, we can improve NGINX Management Suite and provide better support to users. + +### Captured Telemetry Data Points + +The table below shows the captured telemetry data points, the trigger conditions, and their respective purposes. Additional data points may be added in the future. + +{{}} + +|
    Data Point
    | Triggering Event | Purpose | +|--------------------------|------------------------------------|-------| +| Installation | The first time NGINX Management Suite processes are started. | To measure the time it takes to install and start using NGINX Management Suite. | +| Login | When a user logs in to NGINX Management Suite. No data about the user is sent, only the fact that a user successfully authenticated and the timestamp of the login event. | To understand how often users or systems access NGINX Management Suite. | +| Start/Stop processes | When any NGINX Management Suite processes are started or stopped. | To gauge how often users upgrade NGINX Management Suite or troubleshoot issues. This information helps F5 Support diagnose issues. | +| Adding Data Plane(s) | When NGINX Agent registers with NGINX Management Suite for the first time. No data about the data plane is sent, just that an NGINX Agent registered with the platform. | To understand the frequency and quantity of data planes being added to NGINX Management Suite. This information helps inform our scale and performance targets and helps F5 Support diagnose issues. | +| Product Usage | Data is sent daily or when Send Usage is selected from the Licenses page in the web interface or initiated using the API. (Requires a [JWT license]({{< relref "/nim/admin-guide/license/add-license.md#jwt-license" >}}).) | To track and report commercial usage in accordance with entitlement and Flexible Consumption Program (FCP) requirements. | + +{{
    }} + +### Enabling and Disabling Telemetry + +Once you [apply a valid license to NGINX Management Suite]({{< relref "/nim/admin-guide/license/add-license.md" >}}), the platform will automatically try to send the specified telemetry data points to F5 NGINX. It may also include data points captured shortly before the license was applied. For example, if you install NGINX Management Suite and immediately apply the license, the *Installation* data point will be sent. + +#### Disable Telemetry + +You can disable telemetry sharing at any time by going to the NGINX Management Suite web interface and selecting **Settings > License**. You can also disable the feature using the `/license` API endpoint. You can re-enable telemetry from the same locations if you change your mind. + +### Firewall Settings for Telemetry + +To support telemetry for the NGINX Management Suite, allow outbound TCP connections in your firewall to 159.60.126.0/25 on port 443. + +If you are using a JWT license, make sure to allow inbound and outbound access on port 443 to the following URLs: + +- [https://product.apis.f5.com](https://product.apis.f5.com) +- [https://product-s.apis.f5.com/ee](https://product-s.apis.f5.com/ee) + +--- + +## Web Analytics + +Web analytics are collected when users interact with the platform through their web browsers. This data is sent directly from the users' browsers to NGINX and is used to understand user interaction patterns and improve the user experience. + +### Enabling and Disabling Web Analytics + +#### Opt Out of Web Analytics During Provisioning + +During provisioning or upgrade, administrators will see a notice about web analytics collection with an option to opt out. This notice includes a link to F5’s official [Privacy Notice](https://www.f5.com/company/policies/privacy-notice). Administrators can opt out by selecting the provided link. + +#### Disable Web Analytics + +If administrators miss the initial opt-out message, they can follow these steps:: + +1. Select the **User** icon in the top-right corner of the screen. +2. Select **"Collect interaction data (all users)"** to turn the setting off. + +{{}}The admin user’s decision to opt in or out of web analytics affects all users on the platform.{{}} diff --git a/content/nim/system-configuration/configure-vault.md b/content/nim/system-configuration/configure-vault.md new file mode 100644 index 000000000..4d41c0283 --- /dev/null +++ b/content/nim/system-configuration/configure-vault.md @@ -0,0 +1,180 @@ +--- +description: Follow the steps in this guide to configure F5 NGINX Management Suite to + use HashiCorp's Vault for storing secrets. +docs: DOCS-999 +doctypes: +- tutorial +tags: +- docs +title: Configure Vault for Storing Secrets +toc: true +weight: 200 +--- + +{{< shortversions "2.6.0" "latest" "nimvers" >}} + +HashiCorp's Vault is a popular solution for storing secrets. While F5 NGINX Management Suite provides encryption-at-rest for secrets stored on disk, if you have an existing Vault installation, you may prefer to store all secrets in one place. NGINX Management Suite provides a driver you can use to connect to existing Vault installations and store secrets. + +## Before You Begin + +To complete the steps in this guide, you need the following: + +- A working understanding of [Vault](https://www.vaultproject.io) and its operations +- A running version of [Vault 1.8.8 or later](https://www.vaultproject.io/docs/install) + +--- + +## Create Periodic Service Tokens {#create-periodic-service-tokens} + +Access to a vault requires a renewable token. + +{{}}If you attempt to use the vault's Root Token, NGINX Management Suite will not be able to start the secrets driver, as that token is not renewable.{{}} + +To create a periodic service token for NGINX Management Suite, take the following steps: + +1. Use the Vault user interface to create a new policy. + + The "default" policy has no access to store or retrieve secrets, and the root policy is too broad. We recommend creating a policy called `nms_secrets` with these capabilities: + + ```text + path "secret/*" { + capabilities = ["create", "read", "update", "delete", "list"] + } + ``` + +2. Create an initial service token that will last so long as it's renewed on time until it's manually revoked. We recommend a period of 24 hours, which is used in the following example. NGINX Management Suite will always attempt to renew tokens before expiring, so shorter times also work. + + To create a token, take the following step, substituting your vault's Root Token for `$VAULT_ROOT_TOKEN` and your vault's address for `$VAULT_ADDR`: + + ```bash + curl -X POST --header "X-Vault-Token: $VAULT_ROOT_TOKEN" \ + --data '{"policies": "nms_secrets", "period": "24h"}' \ + $VAULT_ADDR/v1/auth/token/create | jq -r ".auth.client_token" > periodic_token.txt + ``` + +3. Verify your token works: + + ```bash + curl --header "X-Vault-Token: $(cat periodic_token.txt)" \ + $VAULT_ADDR/v1/auth/token/lookup-self | jq .data + ``` + +4. If everything looks good, stop the `nms-core` service and configure NGINX Management Suite to use your token: + + ```bash + sudo systemctl stop nms-core + sudo NMS_VAULT_TOKEN=$(cat periodic_token.txt) nms-core secret vault-token + sudo systemctl restart nms-core + ``` + +--- + +## Start Using Vault to Store Secrets {#start-using-vault} + +1. To use the new token on the NGINX Management Suite server, open the `/etc/nms/nms.conf` file for editing. + +2. Update the `secrets` section indented under `core` to tell NGINX Management Suite how to handle secrets. + + For example, an internal development installation of Vault might use: + + ```text + secrets: + # change driver to "local" if you want to stop using vault + driver: vault + config: + # local file path for stored secrets when using the local driver + path: /var/lib/nms/secrets + # key_file is required for local driver + key_file: /var/lib/nms/secrets/key + # vault address for when using the vault driver + address: http://127.0.0.1:8200/v1 + # isolation is used to store secrets in a specific namespace and prefix to better restrict access rights + # on the local file system or shared vault server. + isolation: + namespace: secret + prefix: secureString + ``` + +3. Save the changes and close the file. +4. Restart the NGINX Management Suite services to start using Vault to store secrets: + + ```bash + sudo systemctl restart nms + ``` + +--- + +## Switching between Vault and Local Encryption {#switch-to-from-vault} + +After you've set up Vault to store your secrets, you can easily switch between local encryption and Vault as desired. + +### Switch to Local Encryption + +To switch from using Vault to local disk encryption, take the following steps: + +1. Stop NGINX Management Suite to ensure exclusive access to the secrets: + + ```bash + sudo systemctl stop nms + ``` + +2. Run the following command to migrate secrets from Vault to your local disk: + + ```bash + sudo nms-core secret migrate-secrets-to-local + ``` + +3. Update the `core/secrets/driver` line from `/etc/nms/nms.conf`, which you added in the previous section, to say `driver: local`. + +4. Restart NGINX Management Suite: + + ```bash + sudo systemctl start nms + ``` + +### Switch to Vault + +To switch from using local encryption back to Vault, take the following steps: + +1. Stop NGINX Management Suite to ensure exclusive access to the secrets: + + ```bash + sudo systemctl stop nms + ``` + +2. Run the following command to migrate secrets from your local disk to Vault: + + ```bash + sudo nms-core secret migrate-secrets-to-vault + ``` + +3. Update the `core/secrets/driver` line from `/etc/nms/nms.conf`, which you added in the previous section, to say `driver: vault`. + +4. Restart NGINX Management Suite: + + ```bash + sudo systemctl start nms + ``` + +--- + +## Troubleshooting + +
    +Token has expired + +If the vault service token is manually revoked or expires before renewal -- possibly because NGINX Management Suite was shut down and was +unavailable to renew the token, all access to stored secrets will fail. + +To resolve this problem, you need is to supply a new service token using `nms-core secret vault-token`. See the [Create Periodic Service Tokens](#create-periodic-service-tokens) section above for details on generating and supplying a new token. +
    + +
    + +
    +Certs are missing after switching to/from Vault + +When configuring Vault for storing secrets, NGINX Management Suite assumes that no secrets have been stored previously and won't migrate any existing stored secrets. All existing certs must be uploaded again. + +Stored secrets are not deleted: secrets remain in the encrypted disk storage or vault. We can't guarantee that the secrets will remain accessible forever. If you want to recover the missing secrets, you can [switch back to the other method for storing secrets](#switch-to-from-vault) following the instructions above. Then restart NGINX Management Suite to see the old secrets again. +
    diff --git a/content/nim/system-configuration/configure-with-config.md b/content/nim/system-configuration/configure-with-config.md new file mode 100644 index 000000000..0ae81bf6a --- /dev/null +++ b/content/nim/system-configuration/configure-with-config.md @@ -0,0 +1,195 @@ +--- +description: +docs: DOCS-1100 +doctypes: +- task +tags: +- docs +title: Configure NGINX Management Suite with nms.conf +toc: true +weight: 1 +--- + + +## Overview + +This guide explains how to configure F5 NGINX Management Suite by editing the **/etc/nms/nms.conf** file. + +## Before You Start + +Before you set up NGINX Management Suite, ensure: + +- You have access to the **/etc/nms/nms.conf** file on the host where NGINX Management Suite is installed. +- You understand the required settings and options. +- You have the necessary permissions to edit the configuration file. + +## Configuration Details + +Edit the **/etc/nms/nms.conf** file to configure NGINX Management Suite. The comments in the example configuration file provide details on each setting and its usage. + +
    + Example nms.conf with default settings and values + +```yaml +# This is the default /etc/nms/nms.conf file distributed with Linux packages. + +user: nms +daemon: true +# Root dqlite db directory. Each subdirectory here is dedicated to the process +db_root_dir: /var/lib/nms/dqlite + +# Default log level for all processes. Each process can override this level. +log: + encoding: console + level: error + +modules: + prefix: /var/lib/nms + # NMS modules config are available here to be read if installed + conf_dir: /etc/nms/modules + +core: + # Enable this for core on TCP + # address: 127.0.0.1:8033 + address: unix:/var/run/nms/core.sock + grpc_addr: unix:/var/run/nms/coregrpc.sock + analytics: + # Catalogs config + catalogs: + metrics_data_dir: /usr/share/nms/catalogs/metrics + events_data_dir: /usr/share/nms/catalogs/events + dimensions_data_dir: /usr/share/nms/catalogs/dimensions + # Dqlite config + dqlite: + addr: 127.0.0.1:7891 + # Disable this to prevent automatic cleanup on a module removal of its RBAC features and permissions + disable_rbac_cleanup: false + +dpm: + # Enable this for dpm on TCP + # address: 127.0.0.1:8034 + address: unix:/var/run/nms/dpm.sock + # Enable this for dpm gRPC server on TCP + # grpc_addr: 127.0.0.1:8036 + grpc_addr: unix:/var/run/nms/am.sock + # Dqlite config + dqlite: + addr: 127.0.0.1:7890 + # WATCHDOG configurations + # Enable this setting to specify how often, in seconds, messages are sent to the watchdog. + # The default interval is 2 seconds + reporting_period: 2s + # Enable this setting to specify how often, in seconds, the system checks in with the watchdog timer to reset. + # The default interval is 5 seconds + check_period: 5s + # Enable this setting to specify the maximum allowable time for the system to operate without resetting the watchdog. + # The default interval is 30 seconds + threshold_duration: 30s + # Enable this setting to specify how often, in seconds, performance statistics are collected and analyzed by the watchdog. + # The default interval is 30 seconds + stats_period: 30s + # Enable this setting to specify the maximum amount of time allowed for a deployment process to complete. + # The default interval is 10 minutes + deployment_timeout: 10m + # NATS config + nats: + address: nats://127.0.0.1:9100 + # NATS streaming + store_root_dir: /var/lib/nms/streaming + # 10GB + max_store_bytes: 10737418240 + # 1GB + max_memory_bytes: 1073741824 + # https://docs.nats.io/reference/faq#is-there-a-message-size-limitation-in-nats + # 8MB + max_message_bytes: 8388608 + # ClickHouse schema migration check interval + clickhouse_migration_interval: 100s + # Enable this setting to specify how often, in hours, offline agents are pruned from the system + # The default interval is 72 hours + agent_prune_duration: 72h + # Enable this setting to specify how often, in hours, offline container agents are pruned from the system + # The default interval is 12 hours + agent_container_prune_duration: 12h + +integrations: + # Enable this for integrations on TCP + # address: 127.0.0.1:8037 + address: unix:/var/run/nms/integrations.sock + # Dqlite config + dqlite: + addr: 127.0.0.1:7892 + app_protect_security_update: + # Enable this setting to automatically retrieve the latest Attack Signatures and Threat Campaigns. + enable: true + # Enable this setting to specify how often, in hours, the latest Attack Signatures and Threat Campaigns are retrieved. + # The default interval is 6 hours, the maximum interval is 48 hours, and the minimum is 1 hour. + interval: 6 + # Enable this setting to specify how many updates to download for the latest Attack Signatures and Threat Campaigns. + # By default, the 10 latest updates are downloaded. The maximum value is 20, and the minimum value is 1. + number_of_updates: 10 + policy_manager: + # Time to live for attack signatures. If the attack signatures exceed their TTL and are not deployed to an instance or + # instance group, they will be deleted from the database. Duration unit can be seconds (s), minutes (m), or hours (h). + attack_signatures_ttl: 336h + # Time to live for compiled bundles, this includes compiled security policies and compiled log profiles. If a compiled + # bundle exceeds its TTL and is not deployed to an instance or instance group, it will be deleted from the database. Note + # that the compiled bundle is deleted, not the definition of it (i.e., the security policy or log profile definition). + # Duration unit can be seconds (s), minutes (m), or hours (h). + compiled_bundles_ttl: 336h + # Time to live for threat campaigns. If the threat campaigns exceed their TTL and are not deployed to an instance or + # instance group, they will be deleted from the database. Duration unit can be seconds (s), minutes (m), or hours (h). + threat_campaigns_ttl: 1440h + license: + db: + addr: 127.0.0.1:7893 + +ingestion: + # Enable this for ingestion gRPC server on TCP + # grpc_addr: 127.0.0.1:8035 + grpc_addr: unix:/var/run/nms/ingestion.sock + # Parameters for ingesting metrics and events + sink: + # All limits are inclusive on both ends of the bound. + # Buffer_size limits: 2,000 - 1,000,000 + buffer_size: 20000 + buffer_flush_interval: 1m + buffer_check_interval: 1s + # Insert_connection_retries limits: -1 - 10 + insert_connection_retries: -1 + insert_connection_retry_interval: 5s + # Insert_timeout_retries limits: 2 - 10 + insert_timeout_retries: 3 + insert_timeout_retry_interval: 30s + transaction_timeout: 30s + # Concurrent_transactions limits: 2 - 20 + concurrent_transactions: 10 + +# ClickHouse config for establishing a ClickHouse connection +clickhouse: + # Below address not used if TLS mode is enabled + address: 127.0.0.1:9000 + # Ensure username and password are wrapped in quotes + # The default ClickHouse username on install is empty. If you've set up a custom user, set the username here + username: "" + # The default ClickHouse password on install is empty. If you've set a custom password, set the password here + password: "" + # The TTL configurations below define how long data for features will be retained in ClickHouse + # The default values can be updated for a custom retention period. Restart nms-dpm to apply any modifications to TTL + ttl_configs: + - feature: metrics + ttl: 32 # number of days + - feature: events + ttl: 120 # number of days + - feature: securityevents + ttl: 32 # number of days +# # Enable TLS configurations for ClickHouse connections +# tls: +# # Address pointing to of ClickHouse +# # Below CH address is used when TLS mode is active +# tls_address: 127.0.0.1:9440 +# # Verification should be skipped for self-signed certificates +# skip_verify: true +# key_path +``` +
    \ No newline at end of file diff --git a/content/nim/system-configuration/secure-traffic.md b/content/nim/system-configuration/secure-traffic.md new file mode 100644 index 000000000..dd291fcc0 --- /dev/null +++ b/content/nim/system-configuration/secure-traffic.md @@ -0,0 +1,535 @@ +--- +docs: DOCS-794 +doctypes: +- tutorial +tags: +- docs +title: Secure Client Access and Network Traffic +toc: true +weight: 600 +--- + +{{< include "nim/decoupling/note-legacy-nms-references.md" >}} + +## Overview + +This guide explains how to secure client connections to NGINX Instance Manager and protect traffic between NGINX Instance Manager and NGINX instances. + +With NGINX Plus R33, telemetry data must be reported to a usage reporting endpoint, such as NGINX Instance Manager. This data validates subscription entitlements and tracks usage metrics. This guide also covers how to use the [`ssl_verify`](#ssl_verify-and-usage-reporting-in-nginx-plus-r33) directive to secure telemetry reporting through certificate verification. + +{{< important >}}Never expose your management server to the public internet. The settings in this guide reduce risk, but they can't replace keeping your server inaccessible to unauthorized users.{{< /important >}} + +{{< call-out "tip" "See also:" "fa-solid fa-book" >}} +- To learn how to secure traffic for NGINX Agent, see [NGINX Agent TLS Settings](https://docs.nginx.com/nginx-agent/configuration/encrypt-communication/). +- For details on NGINX Plus entitlement and usage reporting, see [About subscription licenses]({{< relref "solutions/about-subscription-licenses.md" >}}).{{< /call-out >}} + +--- + +## NGINX Proxy SSL Termination + +SSL termination is the process where SSL-encrypted traffic is decrypted at the proxy, in this case, NGINX Instance Manager. Once decrypted, the traffic can be sent to its destination unencrypted or re-encrypted, depending on the configuration. + +To secure traffic between NGINX Plus instances and NGINX Instance Manager, you must configure an SSL certificate and key in the NGINX configuration. This setup applies to both NGINX Open Source and NGINX Plus. For more details, see the [NGINX SSL Termination guide](https://docs.nginx.com/nginx/admin-guide/security-controls/terminating-ssl-http/). + +Starting with NGINX Plus R33, you must also enable `ssl_verify` to verify the SSL certificate used by NGINX Instance Manager when reporting telemetry data. See the section on [`ssl_verify` and usage reporting](#ssl_verify-and-usage-reporting-in-nginx-plus-r33) for more details. + +The example below shows how to set up SSL termination for NGINX Instance Manager: + +
    + /etc/nginx/conf.d/nms-http.conf + +```nginx +# Main external HTTPS server, needs port 443 +server { + listen 443 ssl; + http2 on; + root /var/www/nms; + + server_name _; + + ssl_protocols TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_certificate /etc/nms/certs/manager-server.pem; + ssl_certificate_key /etc/nms/certs/manager-server.key; + ssl_client_certificate /etc/nms/certs/ca.pem; +``` + +
    + +
    + +--- + +## Mutual Client Certificate Authentication Setup (mTLS) + +Mutual TLS (mTLS) is a security method that uses client certificates to verify both the server and the client during communication. This ensures that both NGINX Instance Manager and NGINX Plus instances are securely authenticated, protecting your network from unauthorized access. + +With mTLS, each NGINX instance has a unique client certificate that NGINX Instance Manager verifies before allowing communication. You can configure NGINX as a proxy to handle client certificates for this secure exchange. + +Follow these steps to set up mTLS using a Public Key Infrastructure (PKI) system: + +### Certificate Authority (CA) Setup + +1. **Create a private Certificate Authority (CA)**: + - If you're testing, you can generate the CA on the same machine as NGINX Instance Manager. + - For production environments, follow your organization's security standards (these often require generating CAs on secure, offline machines). + +2. **Set up root and intermediate CAs**: + - The root CA issues certificates to an intermediate CA. The intermediate CA, in turn, issues certificates for clients and servers. This layered setup adds extra security by ensuring that the root CA is only used for top-level tasks. + +3. **Issue Client and Server Certificates**: + - The intermediate CA signs the certificate signing requests (CSRs) and issues certificates to NGINX clients and NGINX Instance Manager. + +### Generate Certificates + +To generate the necessary certificates, follow these steps. You can modify these instructions to suit your specific environment. + +1. **Install OpenSSL** (if it isn't installed already). +2. **Create the certificate generation script**: + - Use the following example script to generate the certificates for your CA, server, and client. Save the script as `make_certs.sh`. + + +
    + make_certs.sh + + ```bash + #!/bin/bash + set -e + + make_ca() { + echo "Creating Self-Signed Root CA certificate and key" + openssl req \ + -new \ + -nodes \ + -x509 \ + -keyout ca.key \ + -out ca.crt \ + -config ca.cnf \ + -extensions v3_req \ + -days 1826 # 5 years + } + + make_int() { + echo "Creating Intermediate CA certificate and key" + openssl req \ + -new \ + -nodes \ + -keyout ca_int.key \ + -out ca_int.csr \ + -config ca-intermediate.cnf \ + -extensions v3_req + openssl req -in ca_int.csr -noout -verify + openssl x509 \ + -req \ + -CA ca.crt \ + -CAkey ca.key \ + -CAcreateserial \ + -in ca_int.csr \ + -out ca_int.crt \ + -extfile ca-intermediate.cnf \ + -extensions v3_req \ + -days 365 # 1 year + openssl verify -CAfile ca.crt ca_int.crt + echo "Creating CA chain" + cat ca_int.crt ca.crt > ca.pem + } + + make_server() { + echo "Creating nginx-manger certificate and key" + openssl req \ + -new \ + -nodes \ + -keyout server.key \ + -out server.csr \ + -config server.cnf + openssl req -in server.csr -noout -verify + openssl x509 \ + -req \ + -CA ca_int.crt \ + -CAkey ca_int.key \ + -CAcreateserial \ + -in server.csr \ + -out server.crt \ + -extfile server.cnf \ + -extensions v3_req \ + -days 365 # 1 year + openssl verify -CAfile ca.pem server.crt + } + + make_agent() { + echo "Creating Agent certificate and key" + openssl req \ + -new \ + -nodes \ + -keyout agent.key \ + -out agent.csr \ + -config agent.cnf + openssl req -in agent.csr -noout -verify + openssl x509 \ + -req \ + -CA ca.crt \ + -CAkey ca.key \ + -CAcreateserial \ + -in agent.csr \ + -out agent.crt \ + -extfile agent.cnf \ + -extensions v3_req \ + -days 365 # 1 year + openssl verify -CAfile ca.pem agent.crt + } + + # MAIN + make_ca + make_int + make_server + make_agent + ``` + +

    + +3. **Place the configuration files**: + - Put the following OpenSSL `.cnf` files in the same directory as the `make_certs.sh` script. These files are needed to configure the certificate authority and generate the appropriate certificates. + +
    + ca.cnf + + {{}} {{}} + + ``` yaml + [req] + default_bits = 4096 + distinguished_name = req_distinguished_name + prompt = no + default_md = sha256 + req_extensions = v3_req + + # recommend changing these to your needs + [req_distinguished_name] + countryName = US + stateOrProvinceName = California + localityName = San Francisco + organizationName = NGINX, Inc. + commonName = nms-ca + + [v3_req] + basicConstraints = critical, CA:true + keyUsage = critical, keyCertSign, cRLSign + subjectKeyIdentifier = hash + ``` + +
    + +
    + ca-intermediate.cnf + + ``` yaml + [req] + default_bits = 4096 + distinguished_name = req_distinguished_name + prompt = no + default_md = sha256 + req_extensions = v3_req + + # recommend changing these to your needs + [req_distinguished_name] + countryName = US + stateOrProvinceName = California + localityName = San Francisco + organizationName = NGINX, Inc. + commonName = nms-int-ca + + [v3_req] + basicConstraints = critical, CA:true + keyUsage = critical, keyCertSign, cRLSign + subjectKeyIdentifier = hash + ``` + +
    + +
    + server.cnf + + ``` yaml + [req] + prompt = no + default_bits = 4096 + x509_extensions = v3_req + req_extensions = v3_req + default_md = sha256 + distinguished_name = req_distinguished_name + + # recommend changing these to your needs + [req_distinguished_name] + countryName = US + stateOrProvinceName = California + localityName = San Francisco + organizationName = NGINX, Inc. + commonName = nginx-manager.example.com + + [v3_req] + basicConstraints = CA:FALSE + keyUsage = nonRepudiation, digitalSignature, keyEncipherment, keyAgreement + extendedKeyUsage = critical, serverAuth + subjectAltName = @alt_names + + # apply any DNS or IP SANs as needed + [alt_names] + DNS.1 = + IP.1 = + ``` + +
    + +
    + agent.cnf + + ``` yaml + [req] + prompt = no + default_bits = 2048 + x509_extensions = v3_req + req_extensions = v3_req + default_md = sha256 + distinguished_name = req_distinguished_name + + # recommend changing these to your needs + [req_distinguished_name] + countryName = US + stateOrProvinceName = California + localityName = San Francisco + organizationName = NGINX, Inc. + commonName = agent.example.com + + [v3_req] + basicConstraints = CA:FALSE + keyUsage = nonRepudiation, digitalSignature, keyEncipherment, keyAgreement + extendedKeyUsage = critical, clientAuth + ``` + +

    + +4. **Run the script**: + - After saving the script, make it executable and run it to generate the certificates. + + ```bash + sudo chmod +x ./make_certs.sh + sudo ./make_certs.sh + ``` + +5. **Copy the certificates to the NGINX instance**: + - Once generated, copy the ca.pem, agent.crt, and agent.key files to the NGINX instance where the NGINX Agent certificates will be installed. + + ```bash + sudo mkdir -p /etc/nms/certs + sudo cp ca.pem /etc/nms/certs/ + sudo cp agent.crt /etc/nms/certs/ + sudo cp agent.key /etc/nms/certs/ + ``` + +6. **Modify the NGINX Agent configuration**: + - Modify the `nginx-agent.conf` file to match the example below. The TLS options configure the NGINX Agent to use client certificate authentication with the NGINX proxy on NGINX Instance Manager. The `ca.pem` file is included as the certificate authority that the agent will use to verify NGINX Instance Manager’s server certificate. + - If the CA is trusted by the operating system, you can omit the ca option. + - Update the server host to match the NGINX Instance Manager address. + + {{< see-also >}}For additional information about TLS configurations for the NGINX Agent, refer to the [NGINX Agent TLS Settings](https://docs.nginx.com/nginx-agent/configuration/encrypt-communication/) topic. {{< /see-also >}} + +
    + /etc/nginx-agent/nginx-agent.conf + + ```yaml {hl_lines=[8,22,23,24,25]} + # + # /etc/nginx-agent/nginx-agent.conf + # + # Configuration file for NGINX Agent. + # + # This file is to track agent configuration values that are meant to be statically set. There + # are additional agent configuration values that are set via the API and agent install script + # which can be found in /var/lib/nginx-agent/agent-dynamic.conf. + + # specify the server grpc port to connect to + server: + # host of the control plane + host: + grpcPort: 443 + # provide servername overrides if using SNI + metrics: "nginx-manager.example.com" + command: "nginx-manager.example.com" + # tls options + tls: + enable: true + skip_verify: false + cert: /etc/nms/certs/agent.crt + key: /etc/nms/certs/agent.key + ca: /etc/nms/certs/ca.pem + log: + # set log level (panic, fatal, error, info, debug, trace; default "info") + level: info + # set log path. if empty, don't log to file. + path: /var/log/nginx-agent/ + # data plane status message / 'heartbeat' + nginx: + # path of NGINX logs to exclude + exclude_logs: "" + + dataplane: + sync: + enable: true + # poll interval for data plane status + status: + poll_interval: 30s + metrics: + # specify the size of a buffer to build before sending metrics + bulk_size: 20 + # specify metrics poll interval + report_interval: 1m + collection_interval: 15s + mode: aggregated + + # OSS NGINX default config path + # path to aux file dirs can also be added + config_dirs: "/etc/nginx:/usr/local/etc/nginx" + ``` + +
    + +7. Copy `ca.pem`, `server.crt`, and `server.key` to NGINX Instance Manager. + + ```bash + sudo cp ca.pem /etc/nms/certs/ + sudo cp server.crt /etc/nms/certs/ + sudo cp server.key /etc/nms/certs/ + ``` + +8. Add a new server to NGINX proxy for gRPC in the NGINX Instance Manager configuration with the newly generated certificates, then reload the service. + + - The `server_name` should match the `server.metrics` and `server.command` values in `nginx-agent.conf`. + - You can remove `MetricsService` and `Commander` locations from the existing server. + - The new server will enforce mTLS communication between NGINX Agent and NGINX Instance Manager, while the previous server can continue serving static content for the web interface and API without mTLS requirements. + + When `tls.skip_verify` is set to `false`, NGINX Agent verifies the server's certificate chain and hostname. Ensure the `server_name` in the configuration matches the Common Name (CN) or Subject Alternative Name (SAN) in the generated certificate. + + ```nginx + # gRPC HTTPS server, needs port 443 + server { + listen 443 ssl; + http2 on; + root /var/www/nms; + + server_name nginx-instance-manager.example.com; + + ssl_protocols TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_certificate /etc/nms/certs/server.crt; + ssl_certificate_key /etc/nms/certs/server.key; + ssl_client_certificate /etc/nms/certs/ca.pem; + ssl_verify_client on; + + # gRPC service for metric ingestion + location /f5.nginx.agent.sdk.MetricsService { + include /etc/nms/nginx/errors-grpc.loc_conf; + grpc_socket_keepalive on; + grpc_read_timeout 5m; + grpc_send_timeout 5m; + client_body_timeout 10m; + grpc_pass grpc://ingestion-grpc-service; + } + + # gRPC service for DPM + location /f5.nginx.agent.sdk.Commander { + include /etc/nms/nginx/errors-grpc.loc_conf; + grpc_socket_keepalive on; + grpc_read_timeout 5m; + grpc_send_timeout 5m; + client_body_timeout 10m; + grpc_pass grpc://dpm-grpc-service; + } + ``` + +9. **Reload NGINX proxy configuration**: + - Apply the new settings by reloading NGINX proxy configuration. + + ```bash + sudo nginx -s reload + ``` + +10. **Restart NGINX Agent**: + - Start or restart NGINX Agent to apply the changes. + + ```bash + sudo systemctl restart nginx-agent + ``` + +--- + +## Configure SSL verification for usage reporting with self-signed certificates {#configure-ssl-verify} + +{{}} +Usage reporting for NGINX Plus R33 or later in network-restricted environments requires **NGINX Instance Manager version 2.18 or later**. +{{}} + +Starting with NGINX Plus R33, NGINX Plus must report usage data to a reporting endpoint, such as NGINX Instance Manager. For more information, see [About subscription licenses]({{< relref "solutions/about-subscription-licenses.md" >}}). + +The [`ssl_verify`](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_verify) directive in the [`mgmt`](https://nginx.org/en/docs/ngx_mgmt_module.html) block ensures that NGINX Plus connects only to trusted reporting endpoints by validating the server's SSL certificate. The `ssl_verify` directive is set to `on` by default. + +### Why `ssl_verify` is important + +When `ssl_verify` is enabled: + +- NGINX Plus validates the SSL certificate presented by NGINX Instance Manager, ensuring it's from a trusted source. +- Secure telemetry transmission prevents man-in-the-middle (MITM) attacks. + +If the certificate is untrusted or invalid, telemetry reporting will fail. This failure can affect subscription validation and may prevent NGINX Plus from functioning properly. + +### Trusting self-signed certificates + +If NGINX Instance Manager uses a self-signed certificate, you must configure NGINX Plus to trust it explicitly. Use the [`ssl_trusted_certificate`](https://pp.nginx.com/yar/libxslt/en/docs/ngx_mgmt_module.html#ssl_trusted_certificate) directive to specify a PEM-formatted file that contains the trusted CA certificate: + +```nginx +mgmt { + usage_report endpoint=; + ssl_verify on; + ssl_trusted_certificate ; + ssl_name manager-server; +} +``` + +Using self-signed certificates requires careful configuration to avoid connection issues or potential vulnerabilities. + +### Disabling `ssl_verify` + +Disabling `ssl_verify` bypasses SSL certificate verification, which reduces security and is **not recommended** for production environments. However, you can disable it in specific cases, such as testing environments or troubleshooting connectivity issues: + +```nginx +mgmt { + ssl_verify off; +} +``` + +--- + +## Troubleshooting + +If NGINX Agent and NGINX Instance Manager are having communication issues, follow these steps to troubleshoot: + +1. **Check access and error logs**: + - Make sure access and error logging are enabled to capture detailed information about errors and request processing. + - By default, both logs are enabled in the `http` block of the main NGINX configuration file: + + ```nginx + # nginx.conf + http { + ... + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + ... + } + ``` + +2. **Check the logs for certificate issues**: + - Review the logs for any errors related to certificates. Ensure the server is using the correct certificates and Certificate Authority (CA). diff --git a/content/nim/troubleshooting.md b/content/nim/troubleshooting.md new file mode 100644 index 000000000..71630f8da --- /dev/null +++ b/content/nim/troubleshooting.md @@ -0,0 +1,150 @@ +--- +description: This topic describes possible issues users might encounter when using + Instance Manager. When possible, suggested workarounds are provided. +docs: DOCS-1224 +doctypes: +- reference +tags: +- docs +title: Troubleshooting +toc: true +weight: 1000 +draft: true +--- + +## New NGINX instances don't show up in Instance Manager + +### Description + +After installing NGINX and the NGINX Agent on an instance, the instance is not returned when calling `GET https://hostname/api/platform/v1/systems`. + +### Resolution + +The NGINX service must be running **before** you start the NGINX Agent. + +- To resolve the issue, try restarting the NGINX Agent: + + ``` bash + sudo systemctl restart nginx-agent + ``` + +--- + +## (RHEL 8) NGINX doesn't start after upgrading NGINX OSS + +### Description + +In some cases, after upgrading NGINX OSS on RHEL 8, the NGINX service may not start and returns an error similar to the following: + +``` text +Job for nginx.service failed because the control process exited with error code. +``` + +The error log may include entries similar to the following example: + +``` text +022/05/12 16:11:23 [emerg] 69688#69688: still could not bind() +22022/05/12 16:18:34 [emerg] 70092#70092: bind() to 0.0.0.0:80 failed (98: Address already in use) +``` + +### Resolution + +Ensure there isn't a process bound to port `80` or `443`. + +1. To stop processes bound to ports `80` and `443`, run the following commands: + + ```bash + sudo fuser -k 80/tcp + sudo fuser -k 443/tcp + ``` + +2. Restart the NGINX service: + + ```bash + sudo service nginx restart + ``` + +--- + +## Scan reports NGINX versions as `undefined` when NGINX App Protect is enabled + +### Description + +When [scanning for NGINX instances]({{< relref "/nim/nginx-instances/scan-instances" >}}), the NGINX version is reported as `undefined` when NGINX App protect is installed. + +### Resolution + +This behavior is **by design**. As a security precaution when NGINX App Protect is installed, the NGINX server does not report its version in any HTTP headers. The **NGINX Plus** and **Instances** pages in the web interface will continue to report the NGINX and NGINX App Protect versions. + +--- + +## The NGINX Agent does not reconnect after a containerized Instance Manager with no persistent volumes is restarted + +### Description + +If Instance Manager is restarted without any persistent volumes configured, the NGINX Agent won't reconnect automatically. + +### Resolution + +When Instance Manager is restarted, its internal API gateway may be assigned a new IP address. + +To update the NGINX Agent's configuration with the new Instance Manager IP address, run the NGINX Agent with the `--server-host` CLI parameter or edit the `nginx-agent.conf` file. Using the `--server-host` CLI parameter will ensure that the setting persists across restarts. + +To learn more, refer to the [NGINX Agent documentation]({{< relref "/nms/nginx-agent/install-nginx-agent.md#nginx-agent-cli-flags-usage" >}}). + +--- + +## "Public Key Not Available" error when upgrading Instance Manager on a Debian-based system + +### Description + +When attempting to upgrade Instance Manager on a Debian-based system, the command `sudo apt-get update` may return the error “public key is not available,” preventing the NGINX Agent from being updated. To resolve this issue, you need to update the public key first. + +### Workaround + +To manually update the public key, take the following steps: + +1. Download a new key from the NGINX Management Suite host: + + - Secure: + + ```shell + curl https:///packages-repository/nginx-signing.key | gpg --dearmor | sudo tee /usr/share/keyrings/nginx-signing.gpg >/dev/null + ``` + + - Insecure: + + ```shell + curl --insecure https:///packages-repository/nginx-signing.key | gpg --dearmor | sudo tee /usr/share/keyrings/nginx-signing.gpg >/dev/null + ``` + +2. Update the `nginx-agent.list` file to reference the new key: + + ```shell + printf "deb [signed-by=/usr/share/keyrings/nginx-signing.gpg] https:///packages-repository/deb/ubuntu `lsb_release -cs` agent\n" | sudo tee /etc/apt/sources.list.d/nginx-agent.list + ``` + +--- + +## Publishing to an instance or instance group returns error "outside the allowed directory list" + +### Description + +If an instance or instance group's configuration references an aux file (for example, an SSL certificate) that is not in the expected allowed directory, publishing the config will fail. The same can happen when a certificate is assigned a file path outside the allowed directory. In both cases, the system returns an error similar to the following: + +```text +Config apply failed (write): the file is outside the allowed directory list. +``` + +### Resolution + +For a failure when publishing of a configuration, move the aux file to the allowed directory and update the configuration; for example, use `/etc/nginx/` for certificates. + +For a failure when publishing a certificate to an instance or instance group, ensure the assigned file paths are set to the allowed directory; for example, use `/etc/nginx`. + +--- + +## How to Get Support + +{{< include "support/how-to-get-support.md" >}} + diff --git a/content/nms/CHANGELOG.txt b/content/nms/CHANGELOG.txt new file mode 100644 index 000000000..41109aefa --- /dev/null +++ b/content/nms/CHANGELOG.txt @@ -0,0 +1,1652 @@ +# CHANGELOG + + + +## 0.211.0 +2023-02-23 + +### Features + +- Merge branch 'docs-1121-http-method' into 'staging' (f2066af6f103fe29f81057883dc27d7cf9427fcb) + +## 0.210.0 +2023-02-23 + +### Features + +- Merge branch 'fix-helm-param-names' into 'staging' (70c6a6990127cba315647efe571578cc6181f2b4) + +## 0.209.0 +2023-02-23 + +### Features + +- Merge branch 'agent-install-docker' into 'staging' (7e8c724ad972c595dcf795629ae00dc4b42871f3) + +## 0.208.0 +2023-02-23 + +### Features + +- Merge branch 'fix-warm-sync-scenario' into 'staging' (b905cc34ac9f53e685d3cad65bd8074d8beafff4) + +## 0.207.0 +2023-02-22 + +### Features + +- Merge branch 'nms-scaling' into 'staging' (fa3a6126ee4495234eeb9a7a5436209c1153433b) + +## 0.206.0 +2023-02-21 + +### Features + +- Merge branch 'DOCOPS-1520-cp' into 'main' (6db2dfdf0700d04d27586e2d34f3e35345499882) +- feat: add versions to Security Monitoring docs (7a6092a8fac9985c62c86f536e220004ee0c21af) + +## 0.205.0 +2023-02-21 + +### Features + +- Merge branch 'devportal-docker-versions' into 'staging' (a6616e5a174e797a542b5795cc14533a920a6dce) + +## 0.204.0 +2023-02-21 + +### Features + +- Merge branch 'remove-devportal-signing-key' into 'staging' (512bacc778f25b4372534acd3264f9a5ea3f76dc) + +## 0.203.0 +2023-02-20 + +### Features + +- feat: Merge branch 'devportal-docker-entrypoint' into 'main' (0212d0b23dffb3cce2d41ea4712be705a4fc74cc) +- Merge branch 'cherry-pick-510e40fc' into 'main' (a4fbd7bc5d87d40248412cea0f772b0c5bb891da) + +## 0.202.0 +2023-02-16 + +### Features + +- Merge branch 'NDD-220-fix-image-refs-dev-portal' into 'staging' (d52ece4f02af98004669147405a4144faf45f990) + +## 0.201.0 +2023-02-16 + +### Features + +- Merge branch 'docs-1120-correlation-id' into 'staging' (cafef48bb969f9f0e6895c495d0e7c40f819789a) + +## 0.200.0 +2023-02-16 + +### Features + +- Merge branch 'docs-1122-bodysizelimit' into 'staging' (9f34bded2496279a730e2714b94eefa5be4c61ee) + +## 0.199.0 +2023-02-16 + +### Features + +- Merge branch 'add-known-issue-for-nms-40484' into 'staging' (8b53821f73353746110d701dc50c72b0d01bdbb8) + +## 0.198.0 +2023-02-15 + +### Features + +- Merge branch 'alpha-sort-acm-policies' into 'staging' (98506c18d6bcb4efc0a0fc4afbb27f6dc321bf7e) + +## 0.197.0 +2023-02-15 + +### Features + +- feat: Merge branch 'helm-docs-image-name' into 'main' (2f3fa9e820ce44b94d814cd75e5a6ccbbabf2f7f) +- Merge branch 'cherry-pick-58de4bf8' into 'main' (c3d8941f14e33ac63e9300214fe0b2cf4d9b0a63) + +## 0.196.0 +2023-02-15 + +### Features + +- feat: Merge branch 'nginx-typos' into 'staging' (5fd8907dc6af4510784051c58fb34c6c5d48688f) +- Merge branch 'cherry-pick-2d8dc9fd' into 'main' (63b144f3f5339b1c252a64dc8c65d1eaafa4f162) + +## 0.195.0 +2023-02-15 + +### Features + +- Merge branch 'proxy-request-headers-docs' into 'staging' (37c986e957438ccbd22a87714faf57ec908f75e6) + +## 0.194.0 +2023-02-14 + +### Features + +- Merge branch 'butmp-acm-policy-template' into 'staging' (843b8212a5bd9eb5cddb169dc609cb18041f5a09) + +## 0.193.0 +2023-02-14 + +### Features + +- Merge branch 'mrajagopal-docops-1603' into 'staging' (d870c0463c05716cef3c918cbddab63b7e1cd5cf) + +## 0.192.0 +2023-02-13 + +### Features + +- Merge branch 'move-log-format-param-table' into 'staging' (263af1498a3a2dbcce2e8eea52c6a77a30c211e6) + +## 0.191.0 +2023-02-13 + +### Features + +- Merge branch 'update-helm-doc' into 'staging' (ce0cbd824d5572d1aa9a21f9496c7630970d3743) + +## 0.190.0 +2023-02-13 + +### Features + +- Merge branch 'cherry-pick-afc2c783' into 'main' (3383025331f828fd27cb08359277001ca97fb6e8) +- feat: Merge branch 'RAM-HELM-DOC-UPDATE' into 'staging' (638e9e58910c0022c6468720dd29b87ecc81723b) + +## 0.189.0 +2023-02-13 + +### Features + +- Merge branch 'NMS-39889/log_format_policy' into 'staging' (f6eee3f794bea8a15c51879fbd08c8c8e9603dff) + +## 0.188.0 +2023-02-13 + +### Features + +- Merge branch 'NDD-212-acm-rhel9-oracle8-support' into 'staging' (21243df01c0ce54b00585b4b83981f8b77cb6b53) + +## 0.187.0 +2023-02-10 + +### Features + +- Merge branch 'cherry-pick-e69dc746' into 'main' (001cef835cca250483c2fb14570fd6304eb31732) +- feat: Merge branch 'dedgar-typos' into 'staging' (7cc884955e43598344d5d6304b3d2beb8d33c2e3) + +## 0.186.0 +2023-02-10 + +### Features + +- Merge branch 'contributing-docs-readme' into 'staging' (3a1a3ccb89e56fb6eae06b277b0a256d2cfe132b) + +## 0.185.0 +2023-02-09 + +### Features + +- Merge branch 'fetch-new-acm-policy-template' into 'staging' (c09bf4bd142fa7d4062be96bb7750c51e0a58743) + +## 0.184.0 +2023-02-08 + +### Features + +- Merge branch 'rjohnston-NMS-40253-nms-prefix-image-name' into 'staging' (8f8880e77da2166a9d0d6f0535d91681ade03739) + +## 0.183.0 +2023-02-08 + +### Features + +- Merge branch 'rjohnston-NMS-40038-fetch-ext-dep-os-list' into 'staging' (81c881c6dd059c125ca005b6754a587fadea666b) + +## 0.182.0 +2023-02-08 + +### Features + +- Merge branch 'fix-NMS-40241-acm-oidc-doc' into 'staging' (9f6a412fe49d247de224ff21694b20a361584ea3) + +## 0.181.0 +2023-02-07 + +### Features + +- feat: Feb 23 theme bump (2f922843f640e22939b1c83929b482c275f23509) +- Merge branch 'feb23-theme-bump-main' into 'main' (0ef82b3dd9b3910984372ba4fc819558ceb44a47) + +## 0.180.0 +2023-02-07 + +### Features + +- Merge branch 'revert-3c8750a3' into 'staging' (1fa309542ef5bcbb96470dd630a5a19670724341) + +## 0.179.0 +2023-02-03 + +### Features + +- Merge branch 'NDD-175-text-code-block' into 'staging' (1feb30b32d8947e46125d0e3e44dcad6e69a3f9d) + +## 0.178.0 +2023-02-03 + +### Features + +- Merge branch 'NDD-189-fix-nim-v1-eula-links' into 'staging' (64122781f88f37b844b59bdbc2b90a10ad36fe64) + +## 0.177.0 +2023-02-03 + +### Features + +- Merge branch 'NDD-125-manage-policies-ui' into 'staging' (ca15f384bab272c5c538619ab0f899dc0beb97f9) + +## 0.176.0 +2023-02-02 + +### Features + +- Merge branch 'rjohnston-NMS-37322-k8s-support-package' into 'staging' (5df7cf9536ccff991ab1eb2d96a75bd940be29d6) + +## 0.175.0 +2023-02-02 + +### Features + +- Merge branch 'revert-49f4eff7' into 'staging' (3dead72c258e37eec6439cad57e63da4b4dfe428) + +## 0.174.0 +2023-02-02 + +### Features + +- Merge branch 'NDD-184' into 'staging' (6744292a3d00f75470833da23b270cdbf460d516) + +## 0.173.0 +2023-02-02 + +### Features + +- feat: Merge branch 'acm-release-1.4.1' into 'main' (ab63896009b07ace3a34c5ac4abb71a3b966410f) +- Merge branch 'acm-release-1.4.1-cp' into 'main' (821cf9895b6be98008cd2c44bed1fcd23bab9c5f) + +## 0.172.0 +2023-02-01 + +### Features + +- Merge branch 'docs-1119-jwt-assertion' into 'staging' (449848a4782e3e850662d26d246807a000c049b7) + +## 0.171.0 +2023-02-01 + +### Features + +- Merge branch 'docs-1118-basic-authn' into 'staging' (192057054ba07841b11c02397838860a9674ff2b) + +## 0.170.0 +2023-02-01 + +### Features + +- Merge branch 'docs-1117-api-key-authn' into 'staging' (a68372506764340551f5bfefcabc1ea8c4630fef) + +## 0.169.0 +2023-01-31 + +### Features + +- Merge branch 'edgar37-staging-patch-25112' into 'staging' (aa2d399bdd8c3c339c79b9db17b4f211ae5d4d5b) + +## 0.168.0 +2023-01-31 + +### Features + +- Merge branch 'edits-to-RN-39943' into 'staging' (a177eba300fa43d562063984fb932ed244b0c074) + +## 0.167.0 +2023-01-30 + +### Features + +- Merge branch 'add-NMS-39431-troubleshooting-guide' into 'staging' (8dc29289ac391d64280719138301839078db4771) + +## 0.166.0 +2023-01-30 + +### Features + +- Merge branch 'sm-release-1.2.0' into 'staging' (e60ec043cbdc2dc6e0221c34fe573f74f6995cb9) + +## 0.165.0 +2023-01-30 + +### Features + +- Merge branch 'nim-release-2.8.0' into 'staging' (f4a472dd0aa46420ce820cef3ba375d3b159256e) + +## 0.164.0 +2023-01-27 + +### Features + +- Merge branch 'fix-DOCS-805' into 'staging' (2b684185cff57ec9de8fa1a25e0250eded2fdeb9) + +## 0.163.0 +2023-01-27 + +### Features + +- Merge branch 'jp-fix-watchdocs-IDs' into 'staging' (4c3923a9eb9b04975dd5c1dea5d8b86a80d81890) + +## 0.162.0 +2023-01-27 + +### Features + +- Merge branch 'NMS-40214-fix-Docker-file-names' into 'staging' (617efb5dad2cf8ed1159f2e061e8159007ab8421) + +## 0.161.0 +2023-01-26 + +### Features + +- Merge branch 'NMS-40142-helm-create-namespace' into 'staging' (3c983ff922c975d1a2a2d0f7c181951cbc16eff9) + +## 0.160.0 +2023-01-25 + +### Features + +- add missing link to acl consumer policy and fix names of policies. (a261b16115acd88f9839054c1a4222516520ccb7) + +## 0.159.0 +2023-01-25 + +### Features + +- fix: fixes broken links blocking publishing from main (015dd862d882c254476425119da32aaaa3229c05) +- Merge branch 'fix-broken-install-guide-links' into 'main' (6843d9bc3710cca9323a07c3c41a4c64ee0fdaed) + +## 0.158.0 +2023-01-25 + +### Features + +- Merge branch 'known-issue-NMS-40142' into 'staging' (f13200c43583c83715b55abb68f036d878aba371) + +## 0.157.0 +2023-01-24 + +### Features + +- Merge branch 'NMS-40045-b' into 'staging' (af7bd22c85f984598d8dce2f0d0a12a9cda90d81) + +## 0.156.0 +2023-01-24 + +### Features + +- Merge branch 'docs-db' into 'staging' (ce08f651b008e8eda1255ab73be5a4333ab2dd95) + +## 0.155.0 +2023-01-24 + +### Features + +- Merge branch 'NMS-40045-docs' into 'staging' (d5865f71440e716d9f10a6c3fdc30f70fc7f619f) + +## 0.154.0 +2023-01-24 + +### Features + +- Merge branch 'staging' into 'staging' (17d71e926220d7771fcc536a07a6bd5a0ecd7bb9) + +## 0.153.0 +2023-01-23 + +### Features + +- Merge branch 'acm-upgrade-paths' into 'staging' (3352951da2408a39e43a0ce8f4247ab23684b321) + +## 0.152.0 +2023-01-23 + +### Features + +- Merge branch 'acm-release-1.4.0' into 'staging' (74be80d14913fe1eb1a2474c4b38b00fc98242fc) + +## 0.151.0 +2023-01-23 + +### Features + +- Merge branch 'cherry-pick-446c714b' into 'staging' (864d98883c227985bc4fcfb7579a1561a2453986) + +## 0.150.0 +2023-01-23 + +### Features + +- Merge branch 'nina-tags' into 'staging' (5cfc9fa829c00255ea5302cff7f3e7dc8da997f6) + +## 0.149.0 +2023-01-20 + +### Features + +- Merge branch 'mrajagopal-docops-1570' into 'staging' (0ea2f4ec3fcc4cf65e3ebb1b6ff452a91eb27c1e) + +## 0.148.0 +2023-01-19 + +### Features + +- Merge branch 'fix-helm-links' into 'staging' (be80b507dc757ad47285535684f1eceb9e637b32) + +## 0.147.0 +2023-01-18 + +### Features + +- Merge branch 'NMS-39814/compilation-error-info' into 'staging' (927ff054e8c1a702e078c2c69138023ff17c91f8) + +## 0.146.0 +2023-01-18 + +### Features + +- Merge branch 'acm-known-issue-39943' into 'staging' (e970db31aca3d1cfa2977e67bf7dca723e2b1803) + +## 0.145.0 +2023-01-18 + +### Features + +- Merge branch 'acm-helm-docs' into 'staging' (0cea042fef03e27745d726d58b48be080e0ffd48) + +## 0.144.0 +2023-01-17 + +### Features + +- Merge branch 'NDD-115-acm-nginx-plus-matrix' into 'staging' (2aef9b2cc14179036d73b89bb46fcdad5f47dff5) + +## 0.143.0 +2023-01-17 + +### Features + +- Merge branch 'NMS-39785' into 'staging' (569602469371b97d143503baac1253d7fcb8e7be) + +## 0.142.0 +2023-01-17 + +### Features + +- Merge branch 'NMS-39786' into 'staging' (8b9139e5b962a8f0b2b6f530f759bb2c6e796f56) + +## 0.141.0 +2023-01-10 + +### Features + +- Merge branch 'docops-1565-nim-admin-password' into 'staging' (c4d93d37ba76a364b63c7029d0903847b3f87fe1) + +## 0.140.0 +2023-01-10 + +### Features + +- Merge branch 'DOCOPS-1562' into 'staging' (34634a7b812cc792cc039afe0091e64d6d989a00) + +## 0.139.0 +2023-01-09 + +### Features + +- Merge branch 'nim-app-sec-docs' into 'staging' (fda3d5529f1f69db2025fa8c6a3f036f6b18c3f6) + +## 0.138.0 +2023-01-06 + +### Features + +- Merge branch 'fix-clickhouse-alias' into 'staging' (a7cebae971e4d6dbbb10829e978db6d2712e549a) + +## 0.137.0 +2023-01-06 + +### Features + +- Merge branch 'add-clickhouse-alias' into 'staging' (2d3307063eebe0d0565b21f808c1ff97db77ead4) + +## 0.136.0 +2023-01-05 + +### Features + +- Merge branch 'NMS-39426' into 'staging' (35f30033968b5461bea86c6a81077975854dd291) + +## 0.135.0 +2023-01-04 + +### Features + +- Merge branch 'docops-1558-tech-spec-nginx-support' into 'staging' (c2be5a645dad5b4bc1304fb6c1b8ec28c8caf48a) + +## 0.134.0 +2023-01-04 + +### Features + +- Merge branch 'clean-up-nim-kis' into 'staging' (d744db0b4e2d89cdc7acd2414bb6d383266883c4) + +## 0.133.0 +2022-12-21 + +### Features + +- Merge branch 'edit-to-nms-38876-rn' into 'staging' (ee8db3d23c4dd8516f9b1d5c7af86031d55d4dae) + +## 0.132.0 +2022-12-20 + +### Features + +- Merge branch 'nim-release-2.7.0' into 'staging' (df79c403e5e0a348982b579d0039db8973362a13) + +## 0.131.0 +2022-12-16 + +### Features + +- Merge branch 'acm-release-1.3.1' into 'staging' (ae9329221e969e2fc64b7f1a451ba1f172784273) + +## 0.130.0 +2022-12-15 + +### Features + +- Merge branch 'jputrino-patch-DOCS-1099' into 'staging' (4eacb6fe4ffec102bb74994a5ca7ca7c1516a83a) + +## 0.129.0 +2022-12-15 + +### Features + +- Merge branch 'DOCOPS-1544-rename-nms-repo' into 'staging' (5501756fedefdaff7f0247134df0d568be843ab2) + +## 0.128.0 +2022-12-13 + +### Features + +- Merge branch 'DOCOPS-1538-default-api-proxy-policy' into 'staging' (c0d097413ac85dbb1a316ed851fb2ba24a1cbf0e) + +## 0.127.0 +2022-12-13 + +### Features + +- Merge branch 'NMS-37244-staging' into 'staging' (6adb0a305d31c0ee5a779b6a6de49842e6f8520f) + +## 0.126.0 +2022-12-12 + +### Features + +- Merge branch 'acm-release-1.3.0' into 'staging' (f79bf1358cd0c0b4408dee01f792ba0e35d0e232) + +## 0.125.0 +2022-12-02 + +### Features + +- Merge branch 'NMS-39204-dupe-upgrade-command' into 'staging' (03aa8af7cd0da9e9661a6f49f2205f93f965a9a7) + +## 0.124.0 +2022-11-30 + +### Features + +- Merge branch 'add-containerized-NIM-toubleshooting-guide' into 'staging' (1dbf0045901a4449038f262383dc0269d3dd90b1) + +## 0.123.0 +2022-11-29 + +### Features + +- Merge branch 'NDD-65-NIM-2.6-RN-edits' into 'staging' (7d9b2dc4d65fdbd8e186c14f4abfd08936365588) + +## 0.122.0 +2022-11-29 + +### Features + +- Merge branch 'watchdocs-audit-cp' into 'staging' (4b700f3f58541b9d4ad2517d6c2e2936364b14eb) + +## 0.121.0 +2022-11-28 + +### Features + +- Merge branch 'NMS-38380-install-n-plus-metrics' into 'staging' (b4df885bdf8c14e612fa1161fd2f5ee8a55f9d89) + +## 0.120.0 +2022-11-28 + +### Features + +- Merge branch 'NMS-38589-fix-helm-upgrade-strategy' into 'staging' (c10db00eaeaf5395090490e96a41b56f459f00e8) + +## 0.119.0 +2022-11-23 + +### Features + +- Merge branch 'nim-2.6.0-rn-updates' into 'staging' (3f21f065384791a98d16d983afc6899f39a541e7) + +## 0.118.0 +2022-11-18 + +### Features + +- Merge branch 'update-nimvers' into 'staging' (52971771f16b93bbdcb1ae96ecd87758f36ae0f8) + +## 0.117.0 +2022-11-18 + +### Features + +- Merge branch 'nim-release-2.6.0' into 'staging' (5e0139d7b5559232063c6f7d94d10daaa23740ef) + +## 0.116.0 +2022-11-15 + +### Features + +- Merge branch 'NMS-38586-helm-upgrade' into 'staging' (1741140128ddc4a49f232dffa0849ada68174d39) + +## 0.115.0 +2022-11-03 + +### Features + +- Merge branch 'docops-1478-explain-nms-platform' into 'staging' (bc1f376cc77b16191226027c5099b8ece8418941) + +## 0.114.0 +2022-11-03 + +### Features + +- Merge branch 'allowed_directories_staging' into 'staging' (cfbffcb89dc37dd98d94643feca2f690abe253c9) + +## 0.113.0 +2022-10-27 + +### Features + +- Merge branch 'add-keycloak-acm-1.2-rns' into 'staging' (077ccada5bdd74c7ebb51bfe59d80edc300e3570) + +## 0.112.0 +2022-10-27 + +### Features + +- Merge branch 'acm-nms-37420-introspection-tutorial' into 'staging' (328a3f4116cbf4153e5861508a8771fef9ce61e0) + +## 0.111.0 +2022-10-25 + +### Features + +- Merge branch 'rprabhu-edits-2' into 'staging' (f363eaa8a6d4048c70218e88761364a68d1f2cde) + +## 0.110.0 +2022-10-20 + +### Features + +- Merge branch 'fix-minor-issues-in-acm-docs' into 'staging' (1edea52ffcddfc6b11abaad226d1191d141da532) + +## 0.109.0 +2022-10-19 + +### Features + +- Merge branch 'acm-1.2.0-grpc-preview-not-alpha' into 'staging' (a3f5b72e0c0e0552a3832aa3fda0242d93520c64) + +## 0.108.0 +2022-10-19 + +### Features + +- Merge branch 'acm-grpc-proxy-edits' into 'staging' (0e0ce55c2773e96086d56c1700435895afebdeba) + +## 0.107.0 +2022-10-18 + +### Features + +- Merge branch 'fix-typo-grpc-method' into 'staging' (e0cd3e54c1f5e3f3e680d961cba286e7b9c252e5) + +## 0.106.0 +2022-10-18 + +### Features + +- Merge branch '1.2.0-release-notes-fix' into 'staging' (7249e0cbb6f69bfbe6d9c4e78c9858dd33d00d90) + +## 0.105.0 +2022-10-18 + +### Features + +- feat: publish ACM 1.2.0 docs - Cherrypick (1a97b08e6f006124ac7726721d91fce21c933121) +- Merge branch 'cherry-pick-3baee6f8' into 'main' (418da36e38c4978980189fabf65a91edce1093f8) + +## 0.104.0 +2022-10-18 + +### Features + +- Merge branch 'DOCOPS-1482-acm-troubleshooting' into 'staging' (b8d7f729570fd2597f121d3c36e1fc358b847c13) + +## 0.103.0 +2022-10-17 + +### Features + +- Merge branch 'fix-broken-example-json-link' into 'staging' (732de4c98db7cd2389688fc6ce96f22f50d21648) + +## 0.102.0 +2022-10-14 + +### Features + +- Merge branch 'tidy-up-include-files' into 'staging' (0218b3afeb1243db7c58edbfa9e10b1509222cb3) + +## 0.101.0 +2022-10-13 + +### Features + +- Merge branch 'count-instance-supported-distro-table' into 'staging' (eeb6e562993e1aa8399726d28e4c3ab511c469f0) + +## 0.100.0 +2022-10-13 + +### Features + +- Merge branch 'rprabhu-edits-2' into 'staging' (cf45136075b41009b08ab6252707e853a679ae53) + +## 0.99.0 +2022-10-12 + +### Features + +- Merge branch 'cherry-pick-063eca29' into 'main' (d8871aa10748ea70b4af08ce7b26324655cf56a3) +- feat: Merge branch 'nms-37940-uninstall-devportal' into 'main' (628cecf7bfd34e47e2c4f13591dfcbe0b185a65e) + +## 0.98.0 +2022-10-11 + +### Features + +- Merge branch 'DOCOPS-1480-fix-broken-links' into 'staging' (448b1d916df6137fe2df2c31b8691c5e3e3bb146) + +## 0.97.0 +2022-10-11 + +### Features + +- Merge branch 'nim-2.5.1-rns' into 'staging' (641a75f873b5e38fd1ea2f5786615d254ae5a8a5) + +## 0.96.0 +2022-10-07 + +### Features + +- Merge branch 'NMS-37749' into 'staging' (5d8e03219b6e485720bfa4eb069196106b2ec182) + +## 0.95.0 +2022-10-06 + +### Features + +- Merge branch 'combine-cert-key-copy-rename-commands' into 'staging' (74a2b678ae005ce2d7222e78c45dc3d2d72bddf8) + +## 0.94.0 +2022-10-06 + +### Features + +- Merge branch 'move-install-first-in-nav' into 'staging' (e0905e7ce1275fffe8ee16e2116473c5205197f8) + +## 0.93.0 +2022-10-06 + +### Features + +- docs: cherry-cick click house version bump to main (3e6e2f484d6e09b05d9139bb693670540660a782) +- Merge branch 'cp-clickhouse-version-bump' into 'main' (c7559728d876acd7d139648edd88a776b317f293) + +## 0.92.0 +2022-10-06 + +### Features + +- Merge branch 'cp-tabbed-install-guide' into 'main' (abeed70988247c90324e5f4d72d147c6891908ef) +- feat: cherry-pick tabbed install guide into main (aac40482020cf3dfd4a736f0a8474e2ada8f798d) + +## 0.91.0 +2022-10-04 + +### Features + +- fix: cherry-pick to fix typo in NIM 2.5 RNs (543355d0909a11ecd3c28dad373a440b68fadf02) +- Merge branch 'cherry-pick-2971834d' into 'main' (35ec817c38abf98d10728fbd5e78b90e34025f67) + +## 0.90.0 +2022-10-04 + +### Features + +- feat: From NMS-37230-configure helm chart to use nginx-plus as apigw (ad82a9f793d4d559333dec136e73edd14921e2e3) +- Merge branch 'NMS-37230-oidc-nms-helm' into 'main' (9171dcf7703ccadaeed584f2156e7fafc4a947d0) +- feat: F5-hugo theme September bump (5dd7955c93ef229e682dc08266f8ff100bcbaf64) +- Merge branch 'f5-hugo-september-bump' into 'main' (4da16ff02d2c5e2b8fbfb234ffd66c7a63db2a07) +- docs: NMS-37155 "Add how to enable create credentials" (dd95f560ef5946eb81734b20d329c495a4bf1bd0) +- Merge branch 'NMS-37155-Add-how-to-create-credentials' into 'main' (8dbeeabb3b56f81438b0c21def6019fe6762ef5f) +- Revert "feat: Merge branch 'NMS-37230-oidc-nms-helm' into 'main'" (c66098ad61f2d13a7f55715bccab492f1a334ea9) +- Merge branch 'revert-9171dcf7' into 'main' (477efb2912f866893ec035452ed25dbe2c09b8b3) +- fix: created includes for tech specs (6f092848c9f21534481fcc4d7db7fbad186ceb16) +- Merge branch 'DOCOPS-1427-break-up-tech-specs' into 'main' (7543e68aa5b9fd9490dde60f5b3f277411ef52c1) +- fix: added back steps for enabling/starting nms- services on RHEL (4a7d6bf99578e45d1359f9c7751a005992af7489) +- Merge branch 'docops-1428-start-nms-services' into 'main' (e145167c79bebf608ae59e855c7ca4c5cdb6454f) +- Merge branch 'fix-broken-link-rbac-doc' into 'main' (e801125bad7a3375d6848985ea72609625f6b963) +- fix: broken link in RBAC doc (0de90c4cd802159394d5af542d9eba642b4efbf9) +- Merge branch 'NMS-37346' into 'main' (8d25be8e0a4cbf035a408e0c720cae4c80c1e88d) +- fix: update information about export functionality in inventory (6ff6f042eb063acb9a68a521480e25a462703feb) +- Revert "feat: Merge branch 'NMS-37346' into 'main'" (0924689ab96aa83c688de17ab70f100a45882040) +- Merge branch 'revert-8d25be8e' into 'main' (a2c0f66e7f6c2e1872cc3ee564714f22c59abaff) +- docs: refactored preparing NMS platform docs (7083744af7c69f8959fcf379afc147b1d4a5382d) +- Merge branch 'DOCOPS-1430-refactor-ACM-getting-started' into 'main' (cfe0667e0a8f62e816c3b6d1193d44101086a045) +- feat: cherry-pick 'nms-36444-update-acm-definitions' into 'main' (8935484639854f7c9d7789303e2f4946c5cd6a13) +- Merge branch 'cherry-pick-e7d5db6c' into 'main' (ca4666fb8349fac014efdd9aa01a67381b02299c) +- chore: add codeowners file (3b2a75a2e446d0dba42897740bea891af88fcc90) +- Merge branch 'j-putrino-patch-codeowners' into 'main' (cbf3e5f61f2ac532394d26c165f2cb8026492d6d) +- fix: cherry-pick clickhouse version bump in fetch-external-dependencies script (f1f428a9a6ca1c6b46689e92710b203e2585be35) +- Merge branch 'cherry-pick-76685a6e' into 'main' (70661a039df96cdf20a37d81bdaf345188b40ab3) +- Merge branch 'cherry-pick-77f6556d' into 'main' (276c0b84b88b8628d3e6907b46701c3f094cc148) +- feat: CP NIM upgrade dependencies for ACM (3b7f34403c13a78b0e0928223af83650f7a0c181) +- feat: Merge NIM 2.5 docs into main (9101d694977650f7df6c4a35df1a5cab818fb581) +- Merge branch 'cherry-pick-da3937bf' into 'main' (15178883ddcf5e0373158e1151542af08225215d) + +## 0.89.1 +2022-09-07 + +### Fixes + +- DOCOPS-1402 + (88cada52bde3e0e583ecf1e80ba749311f014c2c) + +## 0.89.0 +2022-09-05 + +### Features + +- fix: add versions string to ACM docs (bff5c9888cb3df53ed152468dde2c6a97458cf8a) +- Merge branch 'DOCOPS-1293' into 'main' (383d7be07c1e7a1ca192adc8b5f16aea0560bbec) + +## 0.88.0 +2022-09-01 + +### Features + +- Clickhouse edits (535a9c878e472979da86392c0129de683b5f6d3e) +- Merge branch 'clickhouse-edits' into 'main' (aeeb27b3648ed872c1db58192e413752d69bfd9d) + +## 0.87.0 +2022-09-01 + +### Features + +- fix: edits to CH doc and added redirect from broken CLI link (5ca5113688053c8be75f4b1d02230ef4073553fa) +- Merge branch 'clickhouse-edits' into 'main' (1d8b974ecb79e826e7f3a929bbf7dcf02c1bc75d) + +## 0.86.0 +2022-09-01 + +### Features + +- fix: broken CH links (5517fba04c5529510f4f1da4bac40068f4fcc64a) +- Merge branch 'clickhouse-edits' into 'main' (2668142aab6d5ede583f650c4d4f8761d30559fb) + +## 0.85.0 +2022-09-01 + +### Features + +- fix: broken link to CH doc (4426489c32f12f102b7d9f7e6ba240006226791d) +- Merge branch 'clickhouse-edits' into 'main' (26013c18634a851b1eecb135ab8189462fe994f9) + +## 0.84.0 +2022-09-01 + +### Features + +- Merge branch 'clickhouse-installation-edits' into 'main' (cbe99730fc1b51b3ce28e2f240b0d7c7db964044) +- fix: added new topic on how to configure clickhouse (3961a8d5762b76811d498f918486c712edd22c96) + +## 0.83.0 +2022-09-01 + +### Features + +- fix: cleaned up the agent installation guide (508b24a9746115856fb51fc3e4d43795ac3e5e6b) +- Merge branch 'agent-install-edits' into 'main' (40f16c51ead01d1a3d2f1365a4c9daf01223aaec) + +## 0.82.0 +2022-09-01 + +### Features + +- fix: update Agent config, environment variables and CLI flags (e9aa5fef4da1d7f839aa7b2f2d1302719bbd9fe2) +- Merge branch 'nms-34779' into 'main' (549622552f12ae3683db9e01960dcbd500f64f84) + +## 0.81.0 +2022-09-01 + +### Features + +- Merge branch 'DOCOPS-1394b' into 'main' (1bd489944256d5c239cd43b1613d7b49dae1bdea) +- fix: exclude taxonomies from sitemap (c6419059a832a31ad413bcc030fd6f4c2c85d4ca) + +## 0.80.0 +2022-09-01 + +### Features + +- NMS-36992 devportal upgrade steps (bd963b2a646b0ca32a2cbc44a0d722b1bcc63660) +- Merge branch 'nms-36992-devp-upgrade' into 'main' (0ccd74c8124453a45c6258116944b6f8970c1a50) + +## 0.79.0 +2022-08-31 + +### Features + +- Tech spec edits (e85e4eff1efd030783ab1fe9b290b82443f628a0) +- Merge branch 'tech-spec-edits' into 'main' (652bbacf78e3941a622ae8617b4a7a67910621a3) + +## 0.78.0 +2022-08-31 + +### Features + +- Merge branch 'docops-1388-dev-portal-separate-host' into 'main' (5cb6379fa8f03455886ecce284d61337de6cb40f) +- fix: creatd shortcode for devportal dedicated host note (e19dbf392d6c75783a4ea8c58201c37004a77abc) + +## 0.77.0 +2022-08-31 + +### Features + +- Tech spec table hr (e253d7a2be6e3a56ba24545475cb5e3828389dfb) +- Merge branch 'tech-spec-table-hr' into 'main' (c87dbd06c9abf7ae25b9f73cf2bd3c86d33be231) + +## 0.76.0 +2022-08-31 + +### Features + +- fix: clean up categories (9d6e113a5e5065131402fb91acc4a17dcb9c9806) +- Merge branch 'DOCOPS-1394' into 'main' (9e02084f2ce38a11289ebafa01f503e4d72e746c) + +## 0.75.0 +2022-08-31 + +### Features + +- fix: correct tech specs for ACM (eb9a33ca4a49329ee47f3d3eed201bfa69afb796) +- Merge branch 'tech-specs' into 'main' (b999e288bdde9b6231902ca5223b3b87b2a2c0e1) + +## 0.74.0 +2022-08-31 + +### Features + +- Merge branch 'acm-release-1.1.1' into 'main' (5c94016f6ae38552e404b0c105fda2c074c4f928) +- ACM 1.1.1 docs release branch (1c71b73aae28faeb86f9dd61f564923faa661a66) + +## 0.73.0 +2022-08-30 + +### Features + +- Merge branch 'npi-ssm-aws' into 'main' (381a42d9dee357881ff6e61002ac5bd7aaa8de7b) +- docs: Adding Session Manager details if exposing SSH isn't allowed. (0602d5bfc0f11e41ef9a521c0ad1581dbf015478) + +## 0.72.0 +2022-08-30 + +### Features + +- Merge branch 'update-link-sso-devportal' into 'main' (a1554036893e7ff8e48822400003f7ac4317fda1) +- fix: renamed setup-oidc-devportal file (dd7b2a10e6523e20f78b06e1192c08e71dc4734d) + +## 0.71.0 +2022-08-30 + +### Features + +- Merge branch 'add-users-redirect' into 'main' (b9d4709583d19ff30190aded166acd41743aa32d) +- fix: fixed redirect for Add Users URL (b536f03ec624bc6c411fcd52eab18f4f432d6d6d) + +## 0.70.0 +2022-08-30 + +### Features + +- fix: clarified how to configure basic auth for new users (ad87f98d814043ffbd4524fa0728d6ab5a866dff) +- Merge branch 'docops-1369-add-user-basic-auth' into 'main' (c3fadbbefc290e95714821dabcfc8484cc0dcc0e) + +## 0.69.0 +2022-08-30 + +### Features + +- Merge branch 'acm-36957-update-dns-limitation' into 'main' (d1d9c57b5739947d3db46b01524f04e41b35c5a8) +- Acm 36957 update dns limitation (6a2f1bc727d8f9f21d38cd05f8c53709de6aa831) + +## 0.68.0 +2022-08-30 + +### Features + +- fix: update Agent CLI flags (0bbe6893e98226a6a3739c4c063ff68f4d80e770) +- Merge branch 'DOCOPS-1323' into 'main' (d3cedcdd6095503d6807bfb5d9c1375f8b940ece) + +## 0.67.0 +2022-08-30 + +### Features + +- Merge branch 'fix-ki-bug-icons' into 'main' (48713e64cfb9bae890db0ae8e26c303cf04fa039) +- Fix ki bug icons (875893ca33b2da3473ae1a7d41e2a35e4f8ddf83) + +## 0.66.0 +2022-08-30 + +### Features + +- Merge branch 'add-acm-known-issues' into 'main' (49949b2d6b7087561018ba16767668805a36df47) +- fix: adds ACM known issues retroactively (3ec689ebfe70b2d3af1c957b5ec9d3c3162e4331) + +## 0.65.0 +2022-08-29 + +### Features + +- feat: Add additional ACM pages to watchdocs (d9c9d0519a5d09d13436272f500f05d3fde01429) +- Merge branch 'watchdocs-acm-addendum' into 'main' (93e1d1dc201cb8a6ac491c29f0b945df65f7937e) + +## 0.64.0 +2022-08-29 + +### Features + +- docs: added nap reporting section (ccccf167f953bb9f9485de49de7dfd0745f5d34a) +- Merge branch 'NMS-36245' into 'main' (f088a4ca108f57511e2207a0e16cd9364e9a2e3d) + +## 0.63.0 +2022-08-26 + +### Features + +- fix: added RN for NMS-32145 (199c83dba9afd9f966536c8c5cff39cb1c0dfd4e) +- Merge branch 'NMS-32145-add-RN-NIM-2.3' into 'main' (4ac2e3e71a3f1f76cd5d31644d7581c55380bbda) + +## 0.62.0 +2022-08-26 + +### Features + +- Merge branch 'NMS-36933-clickhouse-link' into 'main' (3cea32bd32076e68495324925d3ff133ae7dc3de) +- fix: fixed ClickHouse link (16654087516d0bceea5a1abafd0f8e361f765522) + +## 0.61.0 +2022-08-26 + +### Features + +- feat: DOCOPS-1278 Add untagged NIM pages to watchdocs (232d54047872881408c9c20f52685f6f14b66039) +- Merge branch 'watchdocs-addendum' into 'main' (f2e80be2790b300c8e2c5d7a04b9cc6c6b2dabee) + +## 0.60.0 +2022-08-26 + +### Features + +- feat: DOCOPS-1278 Add remaining live ACM content to watchdocs (6cf3ba16141bfb63a8d54505bfc1951d3af74bff) +- Merge branch 'acm-watchdoc-amendments' into 'main' (737af7049c3029faedee3a044e34379274a9dc0e) + +## 0.59.0 +2022-08-25 + +### Features + +- feat: DOCOPS-1278 Add docswatch metadata to ACM files (0c27922831ca3c7dd72c7b287c0a5148353d0f12) +- Merge branch 'nms-acm-docswatch' into 'main' (5c0d665e8281e36b1b4309aa80824d9399e875a5) + +## 0.58.0 +2022-08-24 + +### Features + +- Merge branch 'DOCOPS-1381-using-metrics-api' into 'main' (8ffd63398994b3be0d7dcc73c2b1cda55da17158) +- fix: removed comma from metrics query (91999a753b25767d5d5294c80992b714afe3f860) + +## 0.57.0 +2022-08-24 + +### Features + +- Merge branch 'add_nms_watchdocs' into 'main' (6db21c0006c0e0c1d315f2d78693b2b82a8a658a) +- DOCOPS-1278 Add docswatch catalogue codes to admin-guides section (6f141f8de1a575c05934a48fede82d59aaace214) + +## 0.56.0 +2022-08-23 + +### Features + +- fix: updated location for placing certs (9abffa2bcf495b2390b20e9e8aa4ec692fc80cff) +- Merge branch 'docops-1368-secure-traffic' into 'main' (b3afa35192050355f1d504b91af9cf6c613481ec) + +## 0.55.0 +2022-08-23 + +### Features + +- Merge branch 'DOCOPS-1056' into 'main' (c256ce644e4ba5c9d5ce69d5ff0c8c0fb7749a85) +- feat: Add new NMS Overview document (b413a682b557880540430ec7893c20569485974a) + +## 0.54.0 +2022-08-23 + +### Features + +- fix: Adds RN and doc update for nms-integrations (f7457bc435ea440e4d0a0c619dafb1b84df5db79) +- Merge branch 'NMS-36731-nms-integrations' into 'main' (25acd7aea11c90961b21d80e9ec9258963d984c7) + +## 0.53.0 +2022-08-22 + +### Features + +- Merge branch 'install-password' into 'main' (607425af98de8b9807ed4036d80780a661d2a093) +- Add password note to NMS installation (ab19851481f956b514052e299a612fdec3b3b93f) + +## 0.52.0 +2022-08-22 + +### Features + +- Merge branch 'aws-edits' into 'main' (c028f2777876e0da36ec5cd18c929dad2a26ab92) +- fix: Removing example names in AWS, provided no value. Fixed tabel reference. (9b9c617083954d6752a84e8cfd19a13f898beb84) + +## 0.51.0 +2022-08-22 + +### Features + +- fix: delete unused doc (3bc29c6aa1dbeca53799acaaa0d4077fb3cdb413) +- Merge branch 'DOCS-896' into 'main' (d7018a7904d9ba4d2ae26a85508b39c734a59ab5) + +## 0.50.0 +2022-08-19 + +### Features + +- Merge branch 'npi-aws-edits' into 'main' (0ad6efccf71d94c236dea73894375361efdc929f) +- Npi aws edits (12c62249e01eebf5443bf462435d95eef1d2c40d) + +## 0.49.0 +2022-08-19 + +### Features + +- Merge branch 'enable-tutorial' into 'main' (0b201154f647b7f020977818e5050f996a2aec09) +- fix: Enable Tutorial Section and Word Fixes to adhere to F5 Style Guide (f5998dd2f0408c1f93fc1966d8b485cf1fcc394a) + +## 0.48.0 +2022-08-19 + +### Features + +- fix: reorged the NMS upgrade guide (a5d5546a4f361bd72d1e0e980a9bd623b97c75d8) +- Merge branch 'docops-1340-upgrade-reorg' into 'main' (103a144f8dae43683713741d28ce6a581b710af9) + +## 0.47.0 +2022-08-19 + +### Features + +- docs: Update content/tech-specs.md (96c1a37447e43760c904d7f0c33b2141d40f16f7) +- Merge branch 'docops-1365-acm-plat-compatibility' into 'main' (4b5466d7c14928805833ff508c6fbdbd9549b70d) + +## 0.46.0 +2022-08-18 + +### Features + +- Upgrade instructions acm release 1.x (54c57a0ffbb769a03f9c7fe7a7201ba1cdfd8cff) +- Merge branch 'upgrade-instructions-acm-release-1.x' into 'main' (15801081f06d13353a29855991c37a31744c593a) + +## 0.45.0 +2022-08-18 + +### Features + +- Merge branch 'npi-update-arch-diagram' into 'main' (f5c1c4ccf2f9594a820ea8183a1abbf04af3ada7) + +### Fixes + +- Updating architecture diagram updating Data Plane Host and API Gateway Proxy (333c6052c13ea0ec285a2376b438cbd9e269d298) +- Updating name of the controller host to also include ACM Module for clarity. (7f60b7a97a2ac2bdaa56c6f2a2695eef26dbec2e) + +## 0.44.0 +2022-08-18 + +### Features + +- Merge branch 'docops-1364' into 'main' (97835fa2af911fad2cfebff4e1c03b76d0e20b97) +- fix: DOCOPS-1364 remove tagging job from gitlab-ci (e76873e198387127c2507c1f97961f1980e1911b) + +## 0.43.0 +2022-08-18 + +### Features + +- Merge branch 'remove-draft-true-aws-deploy-doc' into 'main' (07824d659fb79b09c609a1a812f95d5d01f1567d) + +### Fixes + +- removed draft status (d97bff51f0d44e69932edb39986915ccd8abd6fb) + +## 0.42.0 +2022-08-18 + +### Features + +- Merge branch 'acm-release-1.1.0' into 'main' (8c45a5aa4bf5bb1e585496602bf9bd6d4976f78a) +- feat: ACM release 1.1.0 (61e23401223425be474033f502f0f7dd281d874e) + +## 0.41.0 +2022-08-18 + +### Features + +- Merge branch 'DOCS-909' into 'main' (5f1c397b6b15adb8038ea808b72d3453df02a8e5) +- fix: update DOCS-909 catalog entry code (6cdfa0e63a4f262381058e558ca760847890cb29) + +## 0.40.0 +2022-08-18 + +### Features + +- fix: remove dummy techspec pages for old redirect (51801bbe2e3c64690df572c86c13360f5c57ac2c) +- Merge branch 'DOCOPS-1362' into 'main' (95e85ca9752e2dd51da9b9a37d70173bbb1ac5fc) + +## 0.39.0 +2022-08-17 + +### Features + +- fix: update footer for NMS and revert NMS-36400 (efd0cc0780105fa7b1c8d6345683a13d323cbaca) +- Merge branch 'DOCOPS-1358' into 'main' (e07b4d3a6d8ba701e002d31c772cc14b29c9d05f) + +## 0.38.0 +2022-08-17 + +### Features + +- fix: resolves DOCOPS-1355, DOCOPS-1356 (abccc97d2c49491087f6b5b76e5395704e87e6d7) +- Merge branch 'uninstall-guide-updates' into 'main' (083a2265c872b028117623aabe52983f4b551e49) + +## 0.37.0 +2022-08-16 + +### Features + +- Merge branch 'nms-36599' into 'main' (414f3c2162e6adf883f6d1c2980b5447e2169efb) +- NMS-36599 - update agent onboarding instructions for ACM (13d498f633be8dd0ea42395ed5aba428f79deef9) + +## 0.36.0 +2022-08-16 + +### Features + +- docs: NIM 2.4 RNs and doc updates (aca22a0b42290f47682dab7372f2b9265c3c2a72) +- Merge branch 'nim-release-2.4' into 'main' (a90cac0f3cf369a19fa0d51b53b698633b270d3f) + +## 0.35.0 +2022-08-15 + +### Features + +- Merge branch 'min-nim-version-for-acm' into 'main' (9bb846422c4406570459df82bbe83b78e9a57239) +- fix: add NIM dependency table to ACM tech spec guide (a4ef93bd496a753e63d2a1967806d947805492b2) + +## 0.34.0 +2022-08-15 + +### Features + +- fix: DOCOPS-1350 Update resulting file path name (1cfcaf1d2ef7512e10163a821188a69987b7f34b) +- Merge branch 'DOCOPS-1350' into 'main' (0bd072df949626792b5ef3e8971fc2b685f7b226) + +## 0.33.0 +2022-08-12 + +### Features + +- Merge branch 'tsg-updates' into 'main' (a05272a718faa05ff2534e284e2ea3b5c215e302) +- fix: updated troublehsooting section for NIM (39ef10e2231bb539871a140ebede00f3a9043e67) + +## 0.32.0 +2022-08-12 + +### Features + +- fix: adds Known Issue 35339 to ACM 1.0 RNs (df6993cd046fceb894c090099eba63d18180d412) +- Merge branch 'add-acm-1.0-known-issue-35339' into 'main' (3cf3c5e5b5192fd7a15c956f455a30eb89d4129a) + +## 0.31.0 +2022-08-09 + +### Features + +- feat: how-to developer portals (01844f3eaf1bcfb1dd4d4dc30c93beeb1bf536cf) +- Merge branch 'NMS-34244' into 'main' (cf9b16fe827cae28b6bedcb9d2a225fe6c9116c9) + +## 0.30.0 +2022-08-09 + +### Features + +- fix: Add dummy sections for NIM/ACM redirects - Hide them from the sidebar (8264ddc255853cdd2c30bc4dbf39b4e2ace45804) +- Merge branch 'NMS-36400' into 'main' (20f716c6eb22dc8938c9eba27bf9b5805a31831e) + +## 0.29.0 +2022-08-08 + +### Features + +- Revert "feat: Merge branch 'NMS-36400' into 'main'" (a1c031d48f22f9f601d04766aef66904d30ec5b2) +- Merge branch 'revert-361f81fd' into 'main' (86f317e3cf67ee1c0d179ff03a7f66bc39b8dee7) + +## 0.28.0 +2022-08-08 + +### Features + +- Merge branch 'NMS-36400' into 'main' (361f81fde3e631e7460ecc925776bc85aa97dc0e) +- fix: add redirect templates and dummy NIM sections (a1952a87dc09c833210d5b7bd76577217497d07b) + +## 0.27.0 +2022-08-06 + +### Features + +- fix: adds offline install steps for ACM and Dev Portal (74776a9643132a92fd9271b2b2e0637020ccf0b3) +- Merge branch 'docops-1273-acm-offline-install-guide' into 'main' (806b3dd32b4b4533de924402fafe78853dce9818) + +## 0.26.0 +2022-08-05 + +### Features + +- fix: adds CVE details for unembargoed NMS-34814 (3b1771e7364b9a9d11bd2f9e4156ed3ebb34070d) +- Merge branch 'NMS-34814-update-rn' into 'main' (606262a5dfd1585c6474182d940e0670b168f07f) +- Merge branch 'fix-link-oidc-devportal' into 'main' (b09c8aa5c0158d5dc5e63343768690becbb38a69) + +### Fixes + +- updated link to another repo (2f45faa3db574bb9461b222154024949e0eb3475) + +## 0.25.0 +2022-08-04 + +### Features + +- fix: NMS-32522-Setup-OIDC-Dev-Portals (4727426e72f8cac8ef349c6d466b6e0f9a236397) +- Merge branch 'NMS-32522-Setup-OIDC-Dev-Portals' into 'main' (42142743c440ec72ba7d1c92142c66eece3e68d9) + +## 0.24.0 +2022-08-03 + +### Features + +- docs: Add NGINX Signing Key on NMS Install Guide (6f6b6983902da2e0c58789876ed93a5a15755fb7) +- Merge branch 'add-deb-signing-keys' into 'main' (35a393be03b118fa635ad4862a6053e7b3d17cf6) + +## 0.23.0 +2022-08-02 + +### Features + +- Merge branch 'NMS-33344-Policies-Overview' into 'main' (46625e9af5a5c4fe2a27cc61d3646815ab6c0181) +- Resolve NMS-33344 "Policies overview" (d9d8fe7e46326e03d58dee9f9563ca89a183c4b1) + +## 0.22.0 +2022-08-02 + +### Features + +- Merge branch 'DOCOPS-1284-nim-manage-instance-groups' into 'main' (721a6485341076e8e96fe69dfe886a98791bd340) +- Resolve DOCOPS-1284 "Nim manage instance groups" (3094c807bbbe2d65c6ad2f14f12f276143f91c05) + +## 0.21.0 +2022-08-02 + +### Features + +- Merge branch 'DOCOPS-1314-Update-a-WebUI-step' into 'main' (20d4b41e30c88c0cdf54ae9c73c5dc9b34d31950) + +### Fixes + +- updated per AC (4bf4dbbe27ba68b88f30c367d05492b596a38a5a) +- updated UI step (c191ee7b99ce446863c6d124c7a92d011eee6450) + +## 0.20.0 +2022-07-29 + +### Features + +- fix: redirect for moved migration guide (f3a6398da2e05b35c3b53706c799837359d2899f) +- Merge branch 'fix-nim-install-guide-redirect' into 'main' (79123fc688c728f1951a2019c50eac00257d96a4) + +## 0.19.0 +2022-07-29 + +### Features + +- fix: reorged NIM 2 IA for migration topic (de4e272e4865109c964b86439879ee35241d32a9) +- Merge branch 'cp-53b07d53-move-nim-migration-guide' into 'main' (3dda6b785b96282b5a24dd98eee708d6da44bcef) + +## 0.18.0 +2022-07-28 + +### Features + +- Merge branch 'docops-1312-1313' into 'main' (ef4300ffccbaf6a8bfcc50ba78bcbca04e18edd6) + +### Fixes + +- DOCOPS-1312 & DOCOPS-1313 (aabe44277f43a3b0922e1b985a005d8489c9cd3e) + +## 0.17.0 +2022-07-28 + +### Features + +- Merge branch 'NMS-34693_oidc_update' into 'main' (0956399077d1b421e9f64a3b74fa2caa15b86e93) + +## 0.16.0 +2022-07-27 + +### Features + +- Merge branch 'docops-1309-nim-heml-install' into 'main' (d8722eeccd1356c155de2efd463bcc834b5b3a83) + +### Fixes + +- NIM helm install works only on NIM 2.1.0 (40261b0e296ff263c11c327bcdd10afa2d73492e) + +## 0.15.0 +2022-07-27 + +### Features + +- Merge branch 'update-readme-branching' into 'main' (5114cb88797e291cd02987ae08d780e10551761f) + +## 0.14.0 +2022-07-25 + +### Features + +- Merge branch 'docops-1294' into 'main' (ccbce8ec922e7dd70bc07a41ffa1324c17a63c5c) + +### Fixes + +- DOCOPS-1294 (9f8a8971503cb07c604019f16fd366ac58ddd928) + +## 0.13.0 +2022-07-25 + +### Features + +- Merge branch 'docops-1292' into 'main' (d1c51ac8626fb8022b697a4f8c7fd434d7a7f0e9) + +### Fixes + +- DOCOPS-1292 (07a625439448565409477a0c1cf04710bf1d8edb) + +## 0.12.0 +2022-07-25 + +### Features + +- Merge branch 'remove-nim-1.x-screenshots' into 'main' (ca5590fb691a1232ff76150dbbdef2b2b7d0c0ba) + +### Fixes + +- removes screenshots from NIM 1.x docs (b58c23cc7b245de7079b87c73d17410cc3783c09) + +## 0.11.0 +2022-07-25 + +### Features + +- Merge branch 'docops-1291-restore-nim-1.x-docs' into 'main' (62811b2522901deb29e41fd6cf245b57bccd7767) + +### Fixes + +- restores NIM 1.x docs (0e5aa34a234831a9ebdb70a5d966634985be13dc) + +## 0.10.0 +2022-07-21 + +### Features + +- Merge branch 'fix-broken-link-nim-known-issues' into 'main' (3e5155883c9f69a0df45bc1df5edcbe40778b06f) + +### Fixes + +- broken link to upgrade guide in NIM known issues (6f6154545bce7d72aaf165d12df6d585c22fff23) + +## 0.9.0 +2022-07-21 + +### Features + +- Merge branch 'NMS-36040-known-issue' into 'main' (22e1ad58599439b422cbf6d68fcf5171c787e2ab) + +### Fixes + +- added known issue for nms-36040 (0e558d6b8280a28c316d094c52801662465a71d2) + +## 0.8.0 +2022-07-21 + +### Features + +- Merge branch 'DOCOPS-1154-nim-2.3.1-RNs' into 'main' (4fde4f4ce8946e4aedfe2d866e76fb8304291c5e) + +### Fixes + +- add NIM 2.3.1 RNs (29e769c58401fdfd95d2f16c45b19a83ec5b4358) + +## 0.7.0 +2022-07-19 + +### Features + +- ACM GA v1.0.0 (c58ec5de2e9fb25a022934990b79387a2ccd36dd) +- Merge branch 'ACM-GA-1.0-Docs-Release-Branch' into 'main' (78941f06f66e4da4115743a8e2dbd5553c523da0) + +## 0.4.0 +2022-07-05 + +### Features + +- Merge branch 'NIM-2.3-doc-release-branch' into 'main' (6597f12b9013c16e8e7c81e108074bd4baf207f2) + +## 0.3.0 +2022-06-10 + +### Features + +- Merge branch 'bloviate-product-names' into 'main' (7499e8e8f37a842d5b0cf552aef314cb0128e389) + +### Fixes + +- added full product names to doc IA (60f7c694c021a36770e812f4620f5c7ae6ad05e4) + +## 0.2.0 +2022-06-06 + +### Features + +- Merge branch 'docops-857-port-over-platform-docs' into 'main' (686e852a7d0ceb79a1194bc1d2a4eb9516e0bd88) + +## 0.1.0 +2022-06-02 + +### Features + +- repo CI, Hugo, and Netlify setup (b0244a439205d89954070c78ed976b3c9aa1e2d3) +- Merge branch 'setup-gitlab-ci' into 'main' (1b37c69a9d997b8c2b8b944fc8bf7d1622883a12) + +### Fixes + +- initial commit (22bba50a14debf38dd8e607823bc333146cc3098) +- ACM Docs folder structure (acd7412074a5e1b50fc7d903cc572db9c85409cb) +- Updated the structure as per the IA (bd1c6d27e68dd2a0873e9cc444260bbec980b636) diff --git a/content/nms/CODEOWNERS b/content/nms/CODEOWNERS new file mode 100644 index 000000000..dcd1d84ff --- /dev/null +++ b/content/nms/CODEOWNERS @@ -0,0 +1,27 @@ +# All files +[Tech Writers][1] +/ @tmartin-nginx @j.cahilltorre @a.dooley @jputrino + +[Admin Guides][1] +content/admin-guides/ @j.barrios @forsyth1 @jclopper @ssharma13 @n.mcelwain @jputrino @noumba @i.ukpe @ch.adams @mvernik @tmartin-nginx + +[Tech Specs][1] +content/overview/tech-specs.md @j.barrios @forsyth1 @jclopper @n.mcelwain @jputrino @tmartin-nginx + +[NIM App Sec][1] +content/nim/nginx-app-protect/ @noumba @j.barrios @i.ukpe @jputrino @tmartin-nginx + +[NIM][1] +content/nim/ @forsyth1 @jclopper @j.barrios @ssharma13 @jputrino @tmartin-nginx + +[ACM][1] +content/acm/ @n.mcelwain @jputrino @tmartin-nginx + +[Security Monitoring][1] +content/security/ @noumba @j.barrios @i.ukpe @jputrino @tmartin-nginx + +[Agent][1] +content/nginx-agent/ @ch.adams @mvernik @jputrino @tmartin-nginx + +[Support][1] +content/support/ @j.barrios @forsyth1 @jclopper @ssharma13 @n.mcelwain @jputrino @tmartin-nginx diff --git a/content/nms/_index.md b/content/nms/_index.md new file mode 100644 index 000000000..6614e2d35 --- /dev/null +++ b/content/nms/_index.md @@ -0,0 +1,9 @@ +--- +title: F5 NGINX Management Suite +description: A set of tools that enable enterprise scalability, security, observability, and governance. +url: /nginx-management-suite/ +layout: "nms-eos-list" +cascade: + logo: "NGINX-Management-Suite-product-icon.svg" +--- + diff --git a/content/nms/about.md b/content/nms/about.md new file mode 100644 index 000000000..ed2dd5df5 --- /dev/null +++ b/content/nms/about.md @@ -0,0 +1,55 @@ +--- +title: About +description: F5 NGINX Management Suite brings together advanced features into a single + platform, making it easy for organizations to configure, monitor, and troubleshoot + NGINX instances; manage and govern APIs; optimize load balancing for apps; and enhance + overall security. +weight: 10 +toc: true +type: concept +docs: DOCS-905 +--- + +Explore the topics below to find out what the F5 NGINX Management Suite modules have to offer. + +--- + +## Instance Manager {#instance-manager} + +[NGINX Instance Manager]({{< relref "/nim/">}}) allows you to configure, scale, and manage NGINX Open Source and NGINX Plus instances at scale. Instance Manager provides a [REST API]({{< relref "/nim/fundamentals/api-overview">}}) and web-based graphical user interface (GUI) for managing NGINX instances across multiple servers, making it easier to configure, monitor, and troubleshoot NGINX deployments. + +Instance Manager can be used to manage instances running on-premises, in the cloud, or in hybrid environments, and it supports the deployment of NGINX instances on a variety of operating systems and container platforms. + +Instance Manager also includes advanced features like health checks, rolling updates, and configuration backups, which help to ensure the reliability and security of NGINX deployments. + +### Instance Manager Key Features + +Instance Manager provides the following features: + +- [View metrics and information]({{< relref "/nim/monitoring/view-events-metrics">}}) about data plane host systems and NGINX instances +- [View, edit, and publish NGINX configurations]({{< relref "/nim/nginx-configs/publish-configs">}}) +- [Save NGINX configurations]({{< relref "/nim/nginx-configs/publish-configs#stage-config">}}) for future deployment +- [Analyze NGINX configurations]({{< relref "/nim/nginx-configs/publish-configs">}}) for syntactic errors before publishing them +- [Scan the network]({{< relref "/nim/nginx-instances/scan-instances#scan-ui">}}) to find unmanaged NGINX instances. +- [Manage certificates]({{< relref "/nim/nginx-instances/manage-certificates">}}) +- [Create users, roles, and role permissions]({{< relref "/nim/admin-guide/rbac/overview-rbac">}}) for role-based access control + +--- + +## Security Monitoring {#security-monitoring} + +[Security Monitoring]({{< relref "/nms/security/">}}) allows you to monitor NGINX App Protect WAF with analytics dashboards and security log details to get protection insights for analyzing possible threats or areas for tuning policies. + +### Security Monitoring Key Features + +The Security Monitoring module provides the following features: + +- Informative dashboards that provide valuable protection insights +- In-depth security log details to help with analyzing possible threats and making policy decisions + +--- + +## What's Next? + +- [Review the Technical Specifications]({{< relref "/nim/fundamentals/tech-specs.md">}}) +- [Install NGINX Management Suite]({{< relref "/nim/deploy/_index.md">}}) diff --git a/content/nms/acm/_index.md b/content/nms/acm/_index.md new file mode 100644 index 000000000..cc2f08bb3 --- /dev/null +++ b/content/nms/acm/_index.md @@ -0,0 +1,9 @@ +--- +title: API Connectivity Manager +weight: 500 +aliases: + - /nginx-api-connectivity-manager/ +url: /nginx-management-suite/acm/ +cascade: + type: "acm-eos" +--- diff --git a/content/nms/acm/about/_index.md b/content/nms/acm/about/_index.md new file mode 100644 index 000000000..6bef60e8d --- /dev/null +++ b/content/nms/acm/about/_index.md @@ -0,0 +1,6 @@ +--- +description: "Learn about API Connectivity Manager" +title: About +weight: 100 +url: /nginx-management-suite/acm/about/ +--- diff --git a/content/nms/acm/about/api-overview.md b/content/nms/acm/about/api-overview.md new file mode 100644 index 000000000..cbdbf65f3 --- /dev/null +++ b/content/nms/acm/about/api-overview.md @@ -0,0 +1,149 @@ +--- +description: This topic gives an overview of the F5 NGINX Management Suite API Connectivity + Manager API. +docs: DOCS-929 +tags: +- docs +title: API Overview +toc: true +weight: 300 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Introduction + +API Connectivity Manager provides a [RESTful](https://en.wikipedia.org/wiki/Representational_state_transfer) API that uses standard authentication methods, HTTP response codes, and verbs. + +## Object Model + +You can use the API Connectivity Manager API to connect, secure, and govern your APIs. In addition, API Connectivity Manager lets you separate infrastructure lifecycle management from the API lifecycle, giving your IT/Ops teams and application developers the ability to work independently. + +The API Connectivity Manager API provides the following features: + +- Create and manage isolated Workspaces for business units, development teams, and so on, so each team can develop and deploy at its own pace without affecting other teams. +- Create and manage API infrastructure in isolated workspaces. +- Enforce uniform security policies across all workspaces by applying global policies. +- Create Developer Portals that align with your brand, with custom color themes, logos, and favicons. +- Onboard your APIs to an API Gateway and publish your API documentation to the Developer Portal. +- Let teams apply policies to their API proxies to provide custom quality of service for individual applications. +- Onboard API documentation by uploading an OpenAPI spec. +- Publish your API docs to a Dev Portal while keeping your API's backend service private. +- Let users issue API keys or basic authentication credentials for access to your API. +- Send API calls by using the Developer Portal's API Reference documentation. + +## API Reference Documentation + +You can view the API Connectivity Manager API Reference documentation in the F5 NGINX Management Suite user interface. +To access the API Docs, take the steps below: + +1. Log in to the NGINX Management Suite user interface. +2. From the Launchpad, select the **Docs** card. +3. Select **API Connectivity Manager** from the **Docs** list in the sidebar. The API Connectivity Manager API Reference documentation will then display. + +## Authentication + +API Connectivity Manager supports authentication by using basic authentication or a JSON Web Token (JWT). You can get a JWT by logging in with an OpenID Connect (OIDC) Identity Provider. + +For more information about the available authentication options for NGINX Management Suite, refer to [Set Up Authentication]({{< relref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md" >}}). + +### Basic Authentication + +You can make API requests with basic auth by sending the base64-encoded credentials as a "Basic" token in the "Authorization" request header, as shown in the example below. + +```shell +curl -X GET "https:///api/acm//workspaces/infrastructure" -H "Authorization: Basic YWRtaW..." +``` + +{{}}Even when encoded, basic authentication is not secure. The use of basic auth is not recommended for production environments.{{}} + +### JSON Web Token + +If your organization is using OIDC, you will be prompted to log in with your Identity Provider the first time you attempt to reach an API. After authenticating, you can request a JWT to use in subsequent API calls. + +{{}} +
    + +- The means of requesting a token varies according to the Identity Provider; if you're not sure which provider your organization uses, check with your system administrator or technical support team. +- Automated CI/CD workflows are not supported when using OIDC authentication. +{{
    }} + +The JWT should be sent as a "Bearer" token in the "Authorization" request header, as shown in the example below. + +```shell +curl -X GET "https:///api/acm//workspaces/infrastructure" -H "Authorization: Bearer " +``` + +## Usage + +### Command-line + +You can use tools such as `curl` or [Postman](https://www.postman.com) to interact with the API Connectivity Manager REST API. +The API URL follows the format `https:///api/acm/`. + +{{}}When making API calls by using `curl`, Postman, or any other tool, you will need to provide your authentication information with each call. {{}} + +### User Interface + +You can also use the "Try it Out" function in the API Reference docs to send a call to the API Connectivity Manager API. You do not have to provide a means of authentication when sending API calls via the API Documentation UI because you are already logged in to the NGINX Management Suite platform. + +To do so, take the steps below: + +1. Select the endpoint and action that you want to send. For example: `POST /infrastructure/workspaces`. +2. Select the **Try it Out** button. +3. If the endpoint accepts parameters, replace the placeholder examples in the request body with your desired values. +4. Select the **Execute** button to send the request. +5. When the request completes, the response appears in the UI. + +## Errors and Response Codes + +API Connectivity Manager uses standard HTTP response codes to indicate whether an API request succeeds or fails. Codes in the `2xx` range mean the request succeeded. Codes in the `400` range mean the request failed due to the reason(s) indicated in the response message. Common reasons for `4xx` responses are: + +- requests where required information is missing; +- lack of or incorrect authentication credentials; and +- requests that refer to resources that do not exist or are in use by other resources. + +**HTTP Status Codes** +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Response Code | Meaning | +|---------------|---------| +| 200 | Success: The request was received. | +| 201 | Success: Created the requested resource. | +| 202 | Success: The request was accepted and configuration is in process. | +| 204 | Success: Deleted the requested resource. | +| 400 | Bad Request: Required information is missing or incorrectly formatted. | +| 401 | Unauthorized: You are not logged in or do not have permission to access the requested resource. | +| 404 | Not found: The requested resource does not exist. | +| 409 | Conflict: The requested resource already exists or is referenced by another resource. | + +{{< /bootstrap-table >}} + +## Encoding + +All API Connectivity Manager API endpoints expect and return JSON-formatted data by default. +All JSON-formatted data is expected to be encoded using UTF-8. If you do not specify a media type when sending an API call, then the API defaults to `application/json`. + +## Pagination + +Top-level API Connectivity Manager API endpoints support fetching information about multiple resources ("lists"). Such requests may return large data sets (for example, `GET /services/workspaces/{workspaceName}/proxies` and `GET /services/workspaces/{workspaceName}/proxies/{proxyName}/jobs`). For these endpoints, you can define the size of the data set returned for each call and navigate amongst the pages of data when sending subsequent calls. + +### Parameters + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Name| Format| Type| Description | Default value| +|:---|---|---|--------|--------| +|`page`|integer|query| page number | `1`| +|`pageToken`|string|query|Transactional token used for pagination.
    The token ensures consistency of the query results across requests for various pages of data. It provides a snapshot of the database contents from the time at which the query was received.
    If `pageToken` is not provided with a page request, a token is automatically generated and will be returned in the response metadata. You should include the token in subsequent requests for pages in the query results.

    Sending a query without a pageToken refreshes the query results.|N/A | +|`pageSize`|integer|query|Defines the number of returned items per page.

    The maximum value is 100. If the value is greater, it is automatically scaled down to 100.

    If `pageSize=0`, pagination is disabled and the full dataset will be returned in the response.
    The response size is limited to 10,000 results. If the number of results exceeds 10,000 a warning is returned.|`100`| + +{{< /bootstrap-table >}} + +## Versioning + +Each major version of the API Connectivity Manager API is backward-compatible with the previous releases in that version. +The introduction of backward-incompatible changes to the API Connectivity Manager API constitutes a major version change. +This will be represented in the `` section of the API URI. + +For example, to use a v2 API, you would send requests to `https:///api/acm/v2`. diff --git a/content/nms/acm/about/architecture.md b/content/nms/acm/about/architecture.md new file mode 100644 index 000000000..17ba09883 --- /dev/null +++ b/content/nms/acm/about/architecture.md @@ -0,0 +1,92 @@ +--- +description: Learn about the F5 NGINX Management Suite API Connectivity Manager architecture. +docs: DOCS-892 +doctypes: +- concept +tags: +- docs +title: Architecture Overview +toc: true +weight: 400 +--- + +{{< shortversions "1.0.0" "latest" "acmvers" >}} + +## Overview + +This topic provides an overview of the API Connectivity Manager architecture and personas. + +--- + +## Terminology + +This document introduces the following concepts. + +### Topology + +{{}} + +|
    Term
    | Description | +|-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Management Plane | The management plane is an abstraction layer used to configure, monitor, and manage the layers of a network stack. API Connectivity Manager, a part of the management plane, establishes guardrails and configures rules for the data plane. | +| Data Plane | [F5 NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) instances in the traffic path that act as load balancers, API gateways, firewalls, ingress controllers, and caching systems. | +| Proxy Cluster |

    NGINX is widely known as a reverse proxy, and a Proxy Cluster is a set of one or more NGINX Plus servers working together. A proxy cluster keeps configurations in sync across all instances and maintains data consistency by sharing the runtime state.

    Examples:

    • **API Gateway Cluster**: A cluster of one or more NGINX Plus instances acting as a single proxy for API requests.
    • **Dev Portal Cluster**: A cluster of one or more NGINX Plus instances configured to act as Developer Portals. Developer portals provide a framework for hosting API documentation, provisioning access keys, and managing approval workflows. In addition, you can test your APIs with the "Try It Out" feature.
    | + +{{
    }} + +### Platform Services + +API Connectivity Manager uses [NATS](https://nats.io) to communicate with the NGINX Management Suite platform services. + +{{< include "nms/services/platform-services.md" >}} + +--- + +## Architecture + +The following diagram shows how API Connectivity Manager's components are organized and interact. + +{{< note >}}API Connectivity Manager takes an API-first approach: commands issued using the web interface are processed using the API Connectivity Manager REST API. +{{}} + +{{API Connectivity Manager architecture}} + +--- + +## Personas + +### Infrastructure Admin + +Infrastructure Admins, interacting primarily with the management plane, manage the infrastructure for hosts. + +#### Routine tasks + +- Configure SSO +- Provision the infrastructure +- Configure domain names +- Manage data plane hosts +- Manage certificates +- Enforce global/enterprise policies + +### API Owner + +API Owners oversee the API lifecycle, which they can maintain using a CI/CD pipeline. + +The API Owner relies on the Infrastructure Admin to complete the initial configuration before beginning work. + +#### Routine tasks + +- Set up an API team +- On-board an API +- Configure policies to meet *Quality of Service (QoS)* commitments +- Select the API Gateway cluster for publishing an API +- Select the Dev Portal cluster for publishing API documentation + +### Application Owner + +Application Owners develop new digital experiences. + +#### Routine tasks + +- Learn about APIs and API contracts by reading the documentation on the Dev Portal. +- Test APIs using the "Try It Out" feature in the on-board documentation. diff --git a/content/nms/acm/about/images/HighLevelComponents.png b/content/nms/acm/about/images/HighLevelComponents.png new file mode 100644 index 000000000..0a93d42f8 Binary files /dev/null and b/content/nms/acm/about/images/HighLevelComponents.png differ diff --git a/content/nms/acm/about/introduction.md b/content/nms/acm/about/introduction.md new file mode 100644 index 000000000..f9d8c79d9 --- /dev/null +++ b/content/nms/acm/about/introduction.md @@ -0,0 +1,23 @@ +--- +docs: DOCS-1385 +title: Introduction +weight: 100 +--- + +[API Connectivity Manager]({{< relref "/nms/acm/">}}) enables self-service and automation of API delivery. API Connectivity Manager allows you to deploy, configure, secure, monitor, and govern API gateways at scale. + +The API Connectivity Manager module provides a [REST API]({{< relref "/nms/acm/about/api-overview">}}) that uses standard authentication methods, HTTP response codes, and verbs. + +You can use the API Connectivity Manager API to connect, secure, and govern your APIs. In addition, API Connectivity Manager lets you separate infrastructure lifecycle management from the API lifecycle, allowing your IT/Ops teams and application developers to work independently. + +## API Connectivity Manager Key Features + +The API Connectivity Manager module provides the following features: + +- [Create and manage isolated workspaces]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#create-a-workspace">}}) +- [Create and manage API infrastructure]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-an-environment">}}) in isolated workspaces +- [Enforce uniform security policies]({{< relref "/nms/acm/how-to/policies/tls-policies.md" >}}) across workspaces using global policies +- [Create Developer Portals]({{< relref "/nms/acm/how-to/infrastructure/publish-developer-portal.md" >}}) with custom color themes, logos, and favicons +- [Onboard APIs to an API Gateway]({{< relref "/nms/acm/how-to/infrastructure/publish-developer-portal.md#add-an-api-doc" >}}) and [publish API documentation]({{< relref "/nms/acm/how-to/infrastructure/publish-developer-portal.md#publish-the-api-documentation-and-api-proxy" >}}) to the Developer Portal +- [Apply policies to API proxies]({{< relref "/nms/acm/how-to/policies/manage-policies.md#configure-proxy-policies" >}}) to provide custom quality of service for individual applications +- [Issue API keys]({{< relref "/nms/acm/how-to/infrastructure/enable-sso-devportal" >}}) or basic authentication credentials for access to the API diff --git a/content/nms/acm/about/policies-overview.md b/content/nms/acm/about/policies-overview.md new file mode 100644 index 000000000..1c93b545b --- /dev/null +++ b/content/nms/acm/about/policies-overview.md @@ -0,0 +1,46 @@ +--- +description: Learn about the policies available for use in F5 NGINX Management Suite + API Connectivity Manager. +docs: DOCS-932 +doctypes: +- conceptual +tags: +- docs +title: Available Policies +toc: true +weight: 500 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +This page gives an overview of the available policies in API Connectivity Manager. Policies allow you to protect and secure your services and their data. + +--- + +## Policy Types + +There are two types of policies for API Connectivity Manager: + +{{}} + +| Policy Type | Description | +|------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Global policies](#global-policies) | Global policies, typically managed by an Enterprise Security or Support team, are onboarded as a one-time task when onboarding an API. Global policies are enforced for all of the APIs in an environment. | +| [API proxy policies](#api-proxy-policies) | When onboarding APIs to API Connectivity Manager, API owners define API-level policies to enforce security and behavior characteristics for their APIs. | + +{{}} + +### Global Policies {#global-policies} + +Global policies are enforced for all of the APIs in an environment. Global policies are commonly prescribed by an Enterprise Security or Support team; the Security or Support team decides if API owners can edit the global policies. + +{{< include "acm/about/global-policies.md" >}} + +### API Proxy Policies {#api-proxy-policies} + +Apply API gateway proxy policies to enhance the experience of your APIs. + +{{< include "acm/about/api-proxy-policies.md" >}} + diff --git a/content/nms/acm/about/rbac-roles.md b/content/nms/acm/about/rbac-roles.md new file mode 100644 index 000000000..db97de627 --- /dev/null +++ b/content/nms/acm/about/rbac-roles.md @@ -0,0 +1,28 @@ +--- +title: "RBAC roles" +weight: 600 +docs: "DOCS-1667" +--- + +## Built-In Roles + +### API Connectivity Manager + +API Connectivity Manager comes pre-configured with roles suitable for API Owners and Infrastructure Admins. + +- **API Owner**: The individuals or teams who are responsible for designing, creating, and maintaining APIs. +- **Infrastructure Admin**: Infrastructure Administrators ensure uniform governance across an organization’s infrastructure by setting policies at the infrastructure level, enabling teams to build APIs without interruption while adhering to the organization’s standards. + +#### ACM API Owner {#acm-api-owner} + +{{< include "acm/rbac/api-owner-role.md" >}} + +{{}}The tutorial [Set Up RBAC for API Owners]({{< relref "/nms/acm/tutorials/rbac-api-owners.md">}}) provides an example of how to configure RBAC for API owners.{{}} + +
    + +#### ACM Infra Admin {#acm-infra-admin} + +{{< include "acm/rbac/infra-admin-role.md" >}} + +{{}}The tutorial [Set Up RBAC for Infra Admins]({{< relref "/nms/acm/tutorials/rbac-infra-admins.md">}}) provides an example of how to configure RBAC for Infrastructure Administrators.{{}} diff --git a/content/nms/acm/about/technical-specifications.md b/content/nms/acm/about/technical-specifications.md new file mode 100644 index 000000000..0585a3605 --- /dev/null +++ b/content/nms/acm/about/technical-specifications.md @@ -0,0 +1,49 @@ +--- +docs: DOCS-1470 +title: Technical Specifications +weight: 200 +--- + +### Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +{{< important >}} If you're [installing API Connectivity Manager in an offline environment]({{< relref "/nim/disconnected/offline-install-guide.md#install-acm-offline" >}}) and the minimum required version of Instance Manager is not installed, the API Connectivity Manager installer will exit. You'll need to [install Instance Manager manually]({{< relref "/nim/disconnected/offline-install-guide.md#install-nim-offline" >}}) before installing API Connectivity Manager.{{< /important >}} + +### API Connectivity Manager Supported NGINX Versions {#acm-supported-nginx} + +{{< include "tech-specs/acm-supported-nginx.md" >}} + +### Developer Portal Supported Distributions {#dev-portal-supported-distributions} + +{{< include "tech-specs/acm-dev-portal-supported-distros.md" >}} + +--- + +## Supported Linux Distributions + +{{< call-out "note" "API Connectivity Manager" >}}Make sure you review the [supported distributions for the Developer Portal](#dev-portal-supported-distributions) host before installing the API Connectivity Manager module. There is a slight difference between the supported distributions in that list and this one. +{{< /call-out >}} + +API Connectivity Manager supports the following Linux distributions: + + +{{}} + +| Distribution | Version | Architecture | Instance Manager | API Connectivity Manager | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------|----------------------------|------------------------------------------------------|------------------------------------------------------| +| Amazon Linux | 2 LTS | x86_64 | Supported | Supported | +| CentOS | 7.4 and later in the 7.x family | x86_64 | Supported | Supported | +| Debian | 11
    12 | x86_64
    x86_64 | Supported
    Supported on 2.13+ | Supported
    Not supported | +| Oracle Linux | 7.4 and later in the 7.x family
    8.0 and later in the 8.0.x family | x86_64
    x86_64 | Supported
    Supported on 2.6.0+ | Supported
    Supported on 1.3.0+ | +| RHEL | 7.4 and later in the 7.x family
    8.x and later in the 8.x family
    9.x and later in the 9.x family | x86_64
    x86_64
    x86_64 | Supported
    Supported
    Supported on 2.6.0+ | Supported
    Supported
    Supported on 1.3.0+ | +| Ubuntu | 20.04
    22.04 | x86_64
    x86_64 | Supported
    Supported on 2.3.0+ | Supported
    Supported | + +{{
    }} + + + + +## Supported NGINX Versions + +{{< include "tech-specs/acm-supported-nginx.md" >}} diff --git a/content/nms/acm/getting-started/_index.md b/content/nms/acm/getting-started/_index.md new file mode 100644 index 000000000..769839119 --- /dev/null +++ b/content/nms/acm/getting-started/_index.md @@ -0,0 +1,7 @@ +--- +description: "Learn how to get up and running with F5 NGINX Management Suite API Connectivity Manager." +title: Getting Started Guides +weight: 400 +url: /nginx-management-suite/acm/getting-started/ +--- + diff --git a/content/nms/acm/getting-started/add-api-gateway.md b/content/nms/acm/getting-started/add-api-gateway.md new file mode 100644 index 000000000..0b9398576 --- /dev/null +++ b/content/nms/acm/getting-started/add-api-gateway.md @@ -0,0 +1,229 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to configure + an API Gateway. +docs: DOCS-921 +tags: +- docs +title: Set Up an API Gateway Environment +toc: true +weight: 100 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +In API Connectivity Manager, an API Gateway is a proxy cluster that contains one or more NGINX data plane instances. +These clusters are managed under **Infrastructure Workspaces** and are part of **Environments**. + +### Before You Begin + +Before proceeding with this guide, you should familiarize yourself with the [API Overview]({{< relref "/nms/acm/about/api-overview" >}}) and the [Get Started]({{< relref "/nms/acm/getting-started/overview" >}}) section of this series. + +## Add an Infrastructure Workspace + +First, you'll need to create an Infrastructure Workspace. +This is a logical grouping that allows for separation between business units or teams. + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST| `/infrastructure/workspaces`| + +{{}} + + +```json +{ + "name": "{{infraWorkspaceName}}", + "metadata": { + "description": "App Development Workspace" + }, + "contactDetails": { + "adminEmail": "I.M.Devs@example.com", + "adminName": "I.M. Devs", + "adminPhone": "555 321 1234" + } +} +``` + +## Add an Environment + +Next, add an Environment. + +Environments contain **API Gateways** and **Develper Portals**. +Use the appropriate example below to deploy an API Gateway with either HTTP, HTTP2, or HTTPS. + +### HTTP + +> {{< fa "lightbulb" >}} Use this example to get up and running quickly in a demo environment. + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| + +{{}} + + +```json +{ + "name": "{{environmentName}}", + "functions": [ + "API-GATEWAY" + ], + "proxies": [ + { + "proxyClusterName": "{{instanceGroupName}}", + "hostnames": [ + "{{environmentHostname}}" + ], + "runtime": "GATEWAY-PROXY" + } + ] +} +``` + +### HTTPS + +To deploy a cluster that uses HTTPS for secure inbound communication, you'll add the **TLS Inbound** policy. +Because this is done at the Infrastructure level, this is considered a "Global Policy". + +> {{< fa "lightbulb" >}} You need to provide a valid TLS server certificate and key in this API call. + +{{}} +Need to add requirements for sending this info? Base64 encoding required? +{{}} + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| + +{{}} + + +```json +{ + "name": "{{environmentName}}", + "proxies": [ + { + "proxyClusterName": "{{instanceGroupName}}", + "hostnames": [ + "{{environmentHostname}}" + ], + "policies": { + "tls-inbound": [ + { + "data": { + "serverCerts": [ + { + "key": "{{tls key}}", + "cert": "{{tls cert}}" + } + ] + } + } + ] + } + } + ] +} +``` + +### HTTP2 + +To deploy a cluster that uses HTTP2 for secure inbound communication, you'll add the **TLS Inbound** policy. +Because this is done at the Infrastructure level, this is considered a "Global Policy". + +> {{< fa "lightbulb" >}} You need to provide a valid TLS server certificate and key in this API call. + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| + +{{}} + + +```json +{ + "name": "{{environmentname}}", + "proxies": [ + { + "proxyClusterName": "{{instanceGroupName}}", + "listeners": [ + { + "transportProtocol": "HTTP2", + "port": 443, + "tlsEnabled": true + } + ], + "hostnames": [ + "{{environmentHostname}}" + ], + "policies": { + "tls-inbound": [ + { + "data": { + "serverCerts": [ + { + "key": "{{tls key}}", + "cert": "{{tls cert}}" + } + ] + } + } + ] + } + } + ] +} +``` + +## Onboard F5 NGINX Plus Instances into the Cluster + +Take the steps in this section to install the NGINX Agent on the data plane instances to onboard them into the proxy cluster that you created in the [previous step](#add-an-environment). + +To do so, you need to interact directly with the NGINX Plus data plane hosts. + +- SSH access to the hosts and `sudo` permissions are required. +- You can add up to three NGINX Plus data plane instances to the cluster. + +### Install NGINX Agent on the Data Plane Hosts {#onboard-nginx-plus} + +1. Use SSH to connect and log in to each of the NGINX Plus data plane hosts that you want to add to the API Gateway cluster. +1. Run the onboarding command as root using cURL to download, install, configure, and start the NGINX Agent package. + + - Replace `{{nms-fqdn}}` in the example command with the FQDN or IP address of your API Connectivity Manager management plane host. + - Make sure `-g {{clusterName}}` uses the name of your API Gateway cluster. + + ```bash + curl --insecure https://{{nms-fqdn}}/install/nginx-agent > install.sh && \ + sudo sh install.sh -g {{clusterName}} && sudo systemctl start nginx-agent + ``` + +### Verify the Settings + +Try sending traffic to the hostname you configured for the API Gateway. Send a PUT request to the endpoint shown below to update the Environment. + +1. Send a GET request to the endpoint shown below to verify that the instances were added to the Clusters. + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}`| +| GET | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}?includes=instances&includes=status`| + +{{}} + diff --git a/content/nms/acm/getting-started/add-devportal.md b/content/nms/acm/getting-started/add-devportal.md new file mode 100644 index 000000000..66d1be96c --- /dev/null +++ b/content/nms/acm/getting-started/add-devportal.md @@ -0,0 +1,178 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to configure + a Developer Portal. +docs: DOCS-922 +tags: +- docs +title: Set Up a Developer Portal Environment +toc: true +weight: 200 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +In API Connectivity Manager, a Developer Portal (or, "Dev Portal") is a cluster of F5 NGINX Plus data plane instances. +These clusters are managed under **Infrastructure Workspaces** and are part of **Environments**. + +### Before You Begin + +You should complete the following Quick Start Guide(s) before proceeding with the steps in this guide: + +- [Configure an API Gateway]({{< relref "add-api-gateway" >}}) + +## Add a Developer Portal + +Complete the steps in this guide to add a Developer Portal to the Environment you created in the [previous guide]({{< relref "add-api-gateway" >}}). + +When a Developer Portal environment is created, the API Connectivity Manager configures +a virtual server through which the developer portal service and API Connectivity Manager communicate. By default, the hostname for this server is the +hostname that you provided for the Developer Portal cluster, prefixed with `acm.`. For example: `acm.dev-portal.io`. This virtual server listens on port 81. + +You will need to update your DNS resolver settings to ensure this hostname is resolvable. +The hostname and port for this server can be updated by selecting the **Edit Portal <-> API Connectivity Manager Connectivity** from the **Actions** menu for your desired developer portal. + +{{}} + +- Be sure to provide the IP address or FQDN of the host where you installed the Dev Portal packages as the `{{portalClusterHostname}}`. +- The Dev Portal must run on a dedicated host with the [`njs`](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) module installed. +{{}} + +Use the appropriate example below to deploy an HTTP or HTTPS Developer Portal. + +### HTTP + +> {{< fa "lightbulb" >}} Use this example to get up and running quickly in a demo environment. + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| + +{{}} + + +```json +{ + "name": "{{environmentName}}", + "functions": [ + "DEVPORTAL" + ], + "proxies": [ + { + "proxyClusterName": "{{portalInstanceGroupName}}", + "hostnames": [ + "{{portalClusterHostname}}" + ], + "runtime": "PORTAL-PROXY" + } + ] +} +``` + +### HTTPS + +To deploy a cluster that uses HTTPS for secure inbound communication, you'll add the **TLS Inbound** policy. +Because this is done at the Infrastructure level, this is considered a "Global Policy". + +> {{< fa "lightbulb" >}} You need to provide your TLS server certificate and key as base64-encoded strings in this API call. + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| + +{{}} + + +```json +{ + "name": "{{environmentName}}", + "functions": [ + "DEVPORTAL" + ], + "proxies": [ + { + "proxyClusterName": "{{portalInstanceGroupName}}", + "hostnames": [ + "{{portalClusterHostname}}" + ], + "runtime": "PORTAL-PROXY", + "policies": { + "tls-inbound": [ + { + "data": { + "serverCerts": [ + { + "key": "{{tls key}}", + "cert": "{{tls cert}}" + } + ] + } + } + ] + } + } + ] +} +``` + +## Onboard an NGINX Plus Instance into the Cluster + +Take the steps in this section to install the NGINX Agent on the data plane instance where you installed the Developer Portal packages. +This onboards the host into the proxy cluster that you created in the [previous step](#add-an-environment). + +To do so, you'll need to interact directly with the host. +SSH access to the host and `sudo` permissions are required. + +### Install NGINX Agent on the Data Plane Host + +1. Use SSH to connect and log in to the Dev Portal host. +1. Run the onboarding command as root to download, install, configure, and start the NGINX Agent package. + + - Replace `{{nms-fqdn}}` in the example command with the FQDN or IP address of your Dev Portal host. + - Replace `{{clusterName}}` in the example command with the name of your Developer Portal cluster. + + ```bash + curl --insecure https://{{nms-fqdn}}/install/nginx-agent > install.sh && \ + sudo sh install.sh -g {{clusterName}} && sudo systemctl start nginx-agent + ``` + +### Update the DNS Record + +The NGINX Management Suite management plane host uses the Developer Portal's hostname to communicate with the Dev Portal. +You’ll need to update your DNS resolver settings with the Developer Portal's internal hostname. + +> {{< fa "lightbulb" >}} The internal hostname is the hostname that you provided for the Developer Portal, prefixed with `acm.`. +> For example: `acm.dev-portal.io` + +Next, open the Developer Portal in a browser window and make sure the portal loads. + +## Customize the Developer Portal + +In this step, you'll apply a set of customizations to the Developer Portal. +Because these settings are applied at the Infrastructure level, they are considered "global", meaning they apply to each Dev Portal Proxy that you associate with the cluster. + +{{}}Refer to [Customize the Developer Portal]({{< relref "/nms/acm/how-to/infrastructure/customize-devportal.md" >}}) to learn more about the available customization options and how to customize a Dev Portal via the API Connectivity Manager user interface.{{}} + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/devportals/{{devPortalName}}`| + +{{}} + + +Note that many fields in the example JSON payload -- including the logo image and Markdown documents -- are base64-encoded. + +**Example JSON payload**: {{< fa "download" >}} {{< link "/nms/acm/getting-started/examples/customize-devportal.json" "customize-devportal.json" >}} + +Before you move on to the next guide, open the Dev Portal in your browser to view the changes. +You should see the default Dev Portal replaced by the custom settings. diff --git a/content/nms/acm/getting-started/add-sso-integration.md b/content/nms/acm/getting-started/add-sso-integration.md new file mode 100644 index 000000000..4d6e3deb2 --- /dev/null +++ b/content/nms/acm/getting-started/add-sso-integration.md @@ -0,0 +1,3 @@ +--- +draft: true +--- diff --git a/content/nms/acm/getting-started/examples/customize-devportal.json b/content/nms/acm/getting-started/examples/customize-devportal.json new file mode 100644 index 000000000..e87121aaa --- /dev/null +++ b/content/nms/acm/getting-started/examples/customize-devportal.json @@ -0,0 +1,144 @@ +{ + "brandName": "Configure Header", + "images": { + "favicon": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hsaW5rIiB4PSIwcHgiIHk9IjBweCIgdmlld0JveD0iMCAwIDIwOC4wIDI1Mi45IiB3aWR0aD0iMzIiIGhlaWdodD0iMzIiIGZpbGw9ImN1cnJlbnRDb2xvciI+PHBhdGggZD0iTTE5OC41LDUzLjFMMTA5LjcsMS45Yy00LjMtMi41LTkuNy0yLjUtMTQsMEw3LDUzLjFjLTQuMywyLjUtNyw3LjEtNywxMi4xdjEwMi41YzAsNSwyLjcsOS42LDcsMTIuMUw5NS43LDIzMWM0LjMsMi41LDkuNywyLjUsMTQsMGw4OC43LTUxLjJjNC4zLTIuNSw3LTcuMSw3LTEyLjFWNjUuMkMyMDUuNSw2MC4yLDIwMi44LDU1LjYsMTk4LjUsNTMuMXogTTE1NC4xLDE1NS45YzAsNi4xLTUuNCwxMS4xLTEzLDExLjFjLTUuNSwwLTExLjctMi4yLTE1LjYtN0w3NC4zLDk4Ljd2NTcuMmMwLDYuMS01LDExLjEtMTEuMSwxMS4xaC0wLjZjLTYuMSwwLTExLjEtNS0xMS4xLTExLjFWNzcuMWMwLTYuMSw1LjQtMTEuMSwxMy0xMS4xYzUuNCwwLDExLjcsMi4yLDE1LjYsN2w1MS4yLDYxLjNWNzcuMWMwLTYuMSw1LTExLjEsMTEuMS0xMS4xaDAuNmM2LjEsMCwxMS4xLDUsMTEuMSwxMS4xVjE1NS45eiIgLz48L3N2Zz4=", + "hero": null, + "illustration": { + "image": "data:image/svg+xml;base64,<svg width="925" height="150" viewBox="0 0 925 150" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_308_3856)">
<path fill-rule="evenodd" clip-rule="evenodd" d="M108.702 64.5817C96.8196 62.9223 84.7092 67.5985 79.0311 75.8792L76.9692 74.4654C83.2911 65.246 96.4307 60.3437 109.048 62.1058C121.566 63.8539 133.664 72.1577 138.47 89.3123C140.998 89.5553 143.487 90.7251 145.568 92.4387C147.922 94.3766 149.838 97.0763 150.76 100.137C151.686 103.216 151.602 106.658 149.939 109.997C148.282 113.324 145.112 116.436 140.064 118.988C139.889 119.076 139.696 119.122 139.5 119.122H113.5V116.622H139.199C143.739 114.276 146.369 111.557 147.702 108.882C149.054 106.167 149.127 103.386 148.366 100.858C147.599 98.3114 145.985 96.0201 143.979 94.3688C141.967 92.7119 139.645 91.7654 137.5 91.7654C136.931 91.7654 136.434 91.3813 136.291 90.8309C131.923 74.087 120.47 66.2252 108.702 64.5817Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M35.5255 49.7538C21.3143 47.8561 6.5115 51.6948 -1.348 55.0178C-1.58017 55.116 -1.75 55.353 -1.75 55.6639V122.721C-1.75 123.135 -1.41421 123.471 -0.999998 123.471H108.372C108.626 123.471 108.888 123.318 109.017 123.083C114.193 113.615 115.414 105.47 114.178 98.7817C112.942 92.0936 109.224 86.7275 104.272 82.8561C99.31 78.9771 93.1279 76.6173 87.0515 75.9712C80.9668 75.3242 75.0912 76.4039 70.681 79.2689C70.3496 79.4841 69.936 79.53 69.5655 79.3927C69.1951 79.2554 68.9113 78.951 68.8003 78.5718C65.9416 68.8045 61.1282 62.0939 55.321 57.5842C49.5024 53.0657 42.612 50.7001 35.5255 49.7538ZM70.7272 76.334C67.6866 66.9222 62.7964 60.2241 56.8544 55.6097C50.6055 50.757 43.2669 48.2654 35.8564 47.2758C21.0735 45.3018 5.80999 49.2772 -2.32155 52.7152C-3.53251 53.2271 -4.25 54.4079 -4.25 55.6639V122.721C-4.25 124.516 -2.79492 125.971 -0.999998 125.971H108.372C109.554 125.971 110.65 125.307 111.21 124.282C116.599 114.426 117.999 105.698 116.637 98.3274C115.275 90.9566 111.175 85.0789 105.812 80.8865C100.458 76.7016 93.8275 74.1776 87.3159 73.4852C81.4067 72.8568 75.5105 73.7298 70.7272 76.334Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M194.749 14.3519C194.504 14.018 194.035 13.9459 193.701 14.191L163.857 36.0917L159.794 43.011L168.022 41.6919L197.83 19.8174C198.164 19.5723 198.236 19.103 197.991 18.769L194.749 14.3519ZM192.222 12.1755C193.669 11.1136 195.703 11.4258 196.765 12.8729L200.006 17.2899C201.068 18.737 200.756 20.771 199.309 21.8329L169.014 44.0647L159.666 45.5634C157.788 45.8646 156.406 43.8431 157.37 42.2025L161.962 34.3812L192.222 12.1755Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M282.25 98.5V95.8125H279.75V98.5H282.25ZM282.25 90.4375V85.0625H279.75V90.4375H282.25ZM282.25 79.6875V77C282.25 75.8496 282.137 74.7242 281.922 73.6348L279.469 74.12C279.653 75.0505 279.75 76.0133 279.75 77V79.6875H282.25ZM279.344 67.4147C278.085 65.5341 276.466 63.9153 274.585 62.6562L273.194 64.7337C274.803 65.8109 276.189 67.1965 277.266 68.8055L279.344 67.4147ZM268.365 60.0783C267.276 59.8628 266.15 59.75 265 59.75H261.404V62.25H265C265.987 62.25 266.95 62.3467 267.88 62.5308L268.365 60.0783ZM123.138 59.75H119V62.25H123.138V59.75ZM131.413 59.75V62.25H139.147V59.75H131.413ZM153.53 59.75H146.338V62.25H153.53V59.75ZM167.913 59.75H160.722V62.25H167.913V59.75ZM182.297 59.75H175.105V62.25H182.297V59.75ZM196.68 59.75H189.488V62.25H196.68V59.75ZM211.063 59.75H203.871V62.25H211.063V59.75ZM225.446 59.75H218.255V62.25H225.446V59.75ZM239.829 59.75H232.638V62.25H239.829V59.75ZM254.213 59.75H247.021V62.25H254.213V59.75Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M163.5 132.5C166.261 132.5 168.5 130.261 168.5 127.5C168.5 124.739 166.261 122.5 163.5 122.5C160.739 122.5 158.5 124.739 158.5 127.5C158.5 130.261 160.739 132.5 163.5 132.5ZM163.5 135C167.642 135 171 131.642 171 127.5C171 123.358 167.642 120 163.5 120C159.358 120 156 123.358 156 127.5C156 131.642 159.358 135 163.5 135Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M79.5 33.5V43.5H89.5V33.5H79.5ZM79 31C77.8954 31 77 31.8954 77 33V44C77 45.1046 77.8954 46 79 46H90C91.1046 46 92 45.1046 92 44V33C92 31.8954 91.1046 31 90 31H79Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M219.982 87.837L218.406 87.2927L218.303 85.6294L217.837 80.0511L211.344 79.7558L210.105 86.7963L208.464 87.1523C206.564 87.5643 204.755 88.231 203.074 89.117L201.527 89.9321L200.231 88.7593L200.23 88.7583L195.922 85.1858L191.164 89.6158L194.421 94.1673C194.422 94.1685 194.423 94.1689 194.423 94.1689L195.596 95.5344L194.673 97.0801C193.834 98.4851 193.151 99.996 192.65 101.59L192.147 103.192L185.023 103.787L184.728 110.28L190.24 111.25L190.241 111.25L191.888 111.504L192.287 113.122C192.744 114.978 193.446 116.742 194.359 118.377L195.198 119.881L190.803 125.125L195.208 129.905L201.066 125.759L202.449 126.548C203.748 127.289 205.135 127.898 206.59 128.355L208.192 128.859L208.787 135.982L215.28 136.278L216.25 130.766L216.25 130.765L216.505 129.118L218.122 128.719C219.684 128.334 221.181 127.775 222.59 127.065L224.202 126.252L225.481 127.526L225.483 127.528L229.739 131.162L234.559 126.801L231.369 122.203C231.368 122.202 231.368 122.202 231.367 122.202L230.166 120.81L231.136 119.248C232.071 117.744 232.824 116.113 233.365 114.386L233.867 112.78L240.731 112.207L241.026 105.714L234.114 104.498L233.726 102.913C233.385 101.52 232.905 100.178 232.303 98.9026L231.511 97.2287L232.882 95.9839C232.882 95.9839 232.882 95.9841 232.882 95.9839C232.882 95.9837 232.883 95.983 232.883 95.9825L236.54 91.7454L232.204 86.9019L227.589 90.0686L227.588 90.0695L226.181 91.1322L224.708 90.1633C223.249 89.2039 221.664 88.4184 219.982 87.837ZM195.191 129.917C195.19 129.917 195.19 129.917 195.191 129.917ZM205.841 130.74L206.297 136.211C206.401 137.452 207.409 138.422 208.652 138.479L215.188 138.776C216.431 138.833 217.523 137.957 217.739 136.732L218.712 131.199C218.714 131.188 218.716 131.177 218.718 131.167C218.719 131.16 218.72 131.153 218.721 131.146C220.467 130.715 222.141 130.091 223.716 129.297C223.762 129.342 223.809 129.386 223.859 129.429L228.131 133.077C229.078 133.885 230.477 133.865 231.4 133.03L236.252 128.641C237.175 127.806 237.335 126.416 236.625 125.393L233.422 120.778C233.371 120.704 233.317 120.634 233.26 120.568C234.304 118.887 235.147 117.064 235.751 115.132L240.96 114.697C242.2 114.593 243.171 113.585 243.227 112.342L243.525 105.806C243.581 104.563 242.706 103.471 241.48 103.255L236.154 102.318C235.772 100.76 235.236 99.2591 234.563 97.8342C234.637 97.7668 234.708 97.694 234.776 97.6159L238.446 93.3629C239.259 92.4206 239.247 91.0211 238.416 90.0936L234.053 85.2189C233.223 84.2915 231.833 84.1244 230.807 84.8287L226.175 88.0073C226.143 88.0291 226.112 88.0515 226.081 88.0745C224.452 87.0026 222.68 86.1244 220.798 85.4741C220.797 85.4563 220.796 85.4385 220.794 85.4207L220.327 79.8224C220.223 78.582 219.215 77.6112 217.972 77.5547L211.436 77.2575C210.193 77.2009 209.101 78.0762 208.885 79.302L207.934 84.7091C205.81 85.1699 203.786 85.9155 201.908 86.9054C201.881 86.881 201.854 86.8572 201.826 86.8338L197.501 83.2479C196.543 82.4535 195.144 82.4937 194.233 83.3419L189.445 87.8005C188.534 88.6488 188.395 90.0414 189.119 91.0536L192.388 95.6222C192.432 95.6835 192.478 95.7422 192.527 95.7983C191.589 97.3685 190.826 99.0578 190.265 100.841L184.794 101.297C183.554 101.401 182.583 102.409 182.527 103.652L182.229 110.188C182.173 111.431 183.048 112.523 184.274 112.739L189.807 113.712C189.824 113.715 189.842 113.718 189.86 113.721C190.371 115.796 191.156 117.768 192.175 119.595L188.873 123.535C188.074 124.489 188.107 125.888 188.95 126.803L193.383 131.615C194.227 132.53 195.619 132.677 196.635 131.958L201.211 128.72C202.663 129.548 204.213 130.229 205.841 130.74Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M212.821 118.815C218.614 119.078 223.523 114.596 223.787 108.803C224.05 103.01 219.568 98.1004 213.775 97.837C207.982 97.5736 203.072 102.056 202.809 107.849C202.545 113.642 207.028 118.551 212.821 118.815ZM212.707 121.312C219.879 121.638 225.958 116.089 226.284 108.916C226.61 101.744 221.06 95.6657 213.888 95.3396C206.716 95.0134 200.637 100.563 200.311 107.735C199.985 114.908 205.535 120.986 212.707 121.312Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M300.308 117.48C297.553 115.824 294.574 114.502 291.426 113.572C291.423 113.544 291.42 113.515 291.416 113.487L290.251 104.487C289.993 102.493 288.295 101 286.284 101H275.716C273.705 101 272.007 102.493 271.749 104.487L270.611 113.282C267.216 114.182 264.006 115.533 261.048 117.268C261.003 117.231 260.957 117.195 260.909 117.159L253.668 111.69C252.063 110.478 249.808 110.646 248.401 112.081L241.002 119.628C239.594 121.063 239.471 123.321 240.714 124.901L246.325 132.034C246.4 132.13 246.479 132.221 246.561 132.308C245.163 134.911 244.055 137.693 243.282 140.611L234.487 141.749C232.493 142.007 231 143.705 231 145.716V156.284C231 158.295 232.493 159.993 234.487 160.251L243.487 161.416C243.515 161.42 243.544 161.423 243.572 161.426C244.501 164.57 245.82 167.546 247.474 170.298C247.38 170.399 247.289 170.505 247.204 170.617L241.696 177.83C240.476 179.428 240.632 181.684 242.06 183.099L249.567 190.538C250.996 191.953 253.253 192.088 254.839 190.853L262.001 185.28C262.253 185.084 262.474 184.865 262.665 184.629C265.164 185.938 267.826 186.98 270.611 187.718L271.749 196.513C272.007 198.507 273.705 200 275.716 200H286.284C288.295 200 289.993 198.507 290.251 196.513L291.416 187.513C291.418 187.499 291.42 187.485 291.421 187.47C291.423 187.456 291.425 187.442 291.426 187.428C294.212 186.604 296.867 185.475 299.351 184.077C299.428 184.147 299.508 184.215 299.592 184.28L306.754 189.853C308.34 191.088 310.597 190.953 312.026 189.538L319.533 182.099C320.961 180.684 321.117 178.428 319.897 176.83L314.389 169.617C314.301 169.502 314.208 169.393 314.111 169.29C315.674 166.501 316.899 163.498 317.732 160.335L326.107 159.251C328.101 158.993 329.593 157.295 329.593 155.284V144.716C329.593 142.705 328.101 141.007 326.107 140.749L317.443 139.628C316.731 137.203 315.786 134.877 314.635 132.677C314.864 132.491 315.077 132.277 315.268 132.034L320.879 124.901C322.122 123.321 321.999 121.063 320.591 119.628L313.192 112.081C311.785 110.646 309.53 110.478 307.925 111.69L300.684 117.159C300.55 117.26 300.425 117.367 300.308 117.48ZM288.937 187.193C288.938 187.181 288.939 187.17 288.941 187.159L289.12 185.502L290.718 185.03C293.322 184.261 295.803 183.205 298.125 181.898L299.699 181.013L301.034 182.228C301.063 182.255 301.094 182.281 301.127 182.307L308.289 187.88C308.884 188.343 309.73 188.293 310.266 187.762L317.773 180.323C318.309 179.792 318.367 178.947 317.91 178.347L312.402 171.134C312.368 171.089 312.332 171.047 312.295 171.008L311.031 169.673L311.93 168.068C313.391 165.461 314.536 162.653 315.315 159.698L315.743 158.071L325.786 156.772C326.533 156.675 327.093 156.038 327.093 155.284V144.716C327.093 143.962 326.533 143.325 325.786 143.228L315.505 141.898L315.045 140.332C314.379 138.066 313.496 135.893 312.42 133.836L311.47 132.02L313.063 130.732C313.148 130.664 313.229 130.583 313.303 130.488L318.914 123.355C319.38 122.763 319.334 121.916 318.806 121.378L311.407 113.832C310.879 113.293 310.034 113.23 309.432 113.685L302.19 119.154C302.138 119.194 302.09 119.235 302.046 119.277L300.666 120.612L299.02 119.623C296.444 118.074 293.659 116.839 290.718 115.97L289.12 115.498L288.941 113.841C288.939 113.83 288.938 113.819 288.937 113.807C288.937 113.807 288.937 113.808 288.937 113.807L287.772 104.807C287.675 104.06 287.038 103.5 286.284 103.5H275.716C274.962 103.5 274.325 104.06 274.228 104.807L272.874 115.268L271.251 115.699C268.079 116.539 265.079 117.802 262.313 119.425L260.805 120.309L259.457 119.197C259.439 119.182 259.421 119.168 259.403 119.154L252.161 113.685C251.559 113.23 250.714 113.293 250.186 113.832L242.787 121.378C242.259 121.916 242.213 122.763 242.679 123.355L248.29 130.488C248.319 130.526 248.35 130.561 248.381 130.594L249.615 131.905L248.763 133.491C247.456 135.924 246.421 138.524 245.699 141.251L245.268 142.874L234.807 144.228C234.06 144.325 233.5 144.962 233.5 145.716V156.284C233.5 157.038 234.06 157.675 234.807 157.772L243.807 158.937C243.807 158.937 243.807 158.937 243.807 158.937C243.819 158.938 243.83 158.939 243.841 158.941L245.498 159.12L245.97 160.718C246.838 163.656 248.071 166.437 249.617 169.011L250.592 170.633L249.295 172.011C249.259 172.049 249.224 172.09 249.191 172.134L243.683 179.347C243.226 179.947 243.284 180.792 243.82 181.323L251.327 188.762C251.863 189.292 252.709 189.343 253.304 188.88L260.466 183.307C260.564 183.23 260.648 183.147 260.718 183.06L262.006 181.461L263.824 182.414C266.161 183.637 268.648 184.612 271.251 185.301L272.874 185.732L274.228 196.193C274.325 196.94 274.962 197.5 275.716 197.5H286.284C287.038 197.5 287.675 196.94 287.772 196.193L288.937 187.193C288.937 187.192 288.937 187.193 288.937 187.193Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M281 169.5C291.217 169.5 299.5 161.217 299.5 151C299.5 140.783 291.217 132.5 281 132.5C270.783 132.5 262.5 140.783 262.5 151C262.5 161.217 270.783 169.5 281 169.5ZM281 172C292.598 172 302 162.598 302 151C302 139.402 292.598 130 281 130C269.402 130 260 139.402 260 151C260 162.598 269.402 172 281 172Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M264.75 3.875V0H267.25V3.875H264.75ZM264.75 19.375V11.625H267.25V19.375H264.75ZM264.75 31V27.125H267.25V31C267.25 31.9867 267.347 32.9495 267.531 33.88L265.078 34.3652C264.863 33.2758 264.75 32.1504 264.75 31ZM272.415 45.3438C270.534 44.0847 268.915 42.4658 267.656 40.5853L269.734 39.1945C270.811 40.8035 272.197 42.1891 273.806 43.2663L272.415 45.3438ZM282 48.25C280.85 48.25 279.724 48.1372 278.635 47.9217L279.12 45.4692C280.05 45.6533 281.013 45.75 282 45.75H285.304V48.25H282ZM298.518 48.25H291.911V45.75H298.518V48.25ZM311.732 48.25H305.125V45.75H311.732V48.25ZM324.946 48.25H318.339V45.75H324.946V48.25ZM338.161 48.25H331.554V45.75H338.161V48.25ZM351.375 48.25H344.768V45.75H351.375V48.25ZM364.589 48.25H357.982V45.75H364.589V48.25ZM374.5 48.25H371.196V45.75H374.5V48.25Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M303.75 65C303.75 64.3096 304.31 63.75 305 63.75H328.5C329.19 63.75 329.75 64.3096 329.75 65C329.75 65.6904 329.19 66.25 328.5 66.25H305C304.31 66.25 303.75 65.6904 303.75 65Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M303.75 75C303.75 74.3096 304.31 73.75 305 73.75H351C351.69 73.75 352.25 74.3096 352.25 75C352.25 75.6904 351.69 76.25 351 76.25H305C304.31 76.25 303.75 75.6904 303.75 75Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M344.423 125.241C343.547 126.684 341.453 126.684 340.577 125.241L333.854 114.168C332.943 112.668 334.023 110.75 335.777 110.75L349.223 110.75C350.977 110.75 352.057 112.668 351.146 114.168L344.423 125.241ZM342.5 123.591L348.779 113.25L336.221 113.25L342.5 123.591Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M437.629 108H437.082L436.711 108.401L424.452 121.641C421.243 125.106 423.701 130.732 428.424 130.732H448.036H455.321H474.933C479.656 130.732 482.113 125.106 478.905 121.641L466.645 108.401L466.274 108H465.728H463.811H462.059H437.629ZM462.059 110.5H438.175L426.286 123.34C424.56 125.204 425.882 128.232 428.424 128.232H448.036H455.321H474.933C477.475 128.232 478.797 125.204 477.07 123.34L465.182 110.5H463.811H462.059Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M523.233 28.5H381.671C380.472 28.5 379.5 29.4721 379.5 30.6712V105.627C379.5 106.826 380.472 107.798 381.671 107.798H523.233C524.432 107.798 525.404 106.826 525.404 105.627V30.6712C525.404 29.4721 524.432 28.5 523.233 28.5ZM381.671 26C379.091 26 377 28.0914 377 30.6712V105.627C377 108.207 379.091 110.298 381.671 110.298H523.233C525.813 110.298 527.904 108.207 527.904 105.627V30.6712C527.904 28.0914 525.813 26 523.233 26H381.671Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M423.626 48.08H432.42L440.865 78H433.223L431.263 70.92H424.509L422.589 78H415.224L423.626 48.08ZM425.141 50.08L417.863 76H421.059L422.979 68.92H432.784L434.744 76H438.222L430.907 50.08H425.141ZM443.208 48.08H454.808C457.721 48.08 460.1 48.9143 461.78 50.7185C463.434 52.4952 464.208 54.8694 464.208 57.72C464.208 60.5705 463.434 62.9447 461.78 64.7214C460.1 66.5257 457.721 67.36 454.808 67.36H450.448V78H443.208V48.08ZM445.208 50.08V76H448.448V65.36H454.808C457.335 65.36 459.116 64.6476 460.316 63.3585C461.542 62.0419 462.208 60.2027 462.208 57.72C462.208 55.2372 461.542 53.398 460.316 52.0814C459.116 50.7923 457.335 50.08 454.808 50.08H445.208ZM468.112 48.08H487.872V54H481.592V72.08H487.872V78H468.112V72.08H474.392V54H468.112V48.08ZM470.112 50.08V52H476.392V74.08H470.112V76H485.872V74.08H479.592V52H485.872V50.08H470.112ZM448.448 52.36H453.728C455.233 52.36 456.561 52.6641 457.451 53.5094C458.351 54.3375 458.688 55.5731 458.688 56.96V58.48C458.688 59.8759 458.347 61.113 457.459 61.9626L457.451 61.9706L457.442 61.9784C456.553 62.7908 455.229 63.08 453.728 63.08H448.448V52.36ZM450.448 54.36V61.08H453.728C455.043 61.08 455.745 60.8136 456.085 60.5091C456.42 60.1834 456.688 59.5813 456.688 58.48V56.96C456.688 55.8446 456.413 55.2703 456.094 54.9784L456.085 54.9706L456.077 54.9626C455.74 54.6406 455.045 54.36 453.728 54.36H450.448ZM426.978 53.24H428.829L432.45 66.64H423.358L426.978 53.24ZM427.904 57.4818L425.97 64.64H429.838L427.904 57.4818Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M526.478 93.25H379.335V90.75H526.478V93.25Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M553.5 89.5V99.5H563.5V89.5H553.5ZM553 87C551.895 87 551 87.8954 551 89V100C551 101.105 551.895 102 553 102H564C565.105 102 566 101.105 566 100V89C566 87.8954 565.105 87 564 87H553Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M661.712 46.5V48.2303H659.212V46.5H661.712ZM661.712 51.6908V53.4211C661.712 54.5715 661.599 55.6968 661.383 56.7862L658.931 56.3011C659.115 55.3706 659.212 54.4077 659.212 53.4211V51.6908H661.712ZM658.805 63.0063C657.546 64.8869 655.928 66.5058 654.047 67.7648L652.656 65.6874C654.265 64.6102 655.651 63.2245 656.728 61.6155L658.805 63.0063ZM647.827 70.3427C646.738 70.5583 645.612 70.6711 644.462 70.6711H640.912V68.1711H644.462C645.448 68.1711 646.411 68.0743 647.342 67.8903L647.827 70.3427ZM534.432 70.6711H530.883V68.1711H534.432V70.6711ZM548.63 70.6711H541.531V68.1711H548.63V70.6711ZM562.827 70.6711H555.728V68.1711H562.827V70.6711ZM577.024 70.6711H569.926V68.1711H577.024V70.6711ZM591.222 70.6711H584.123V68.1711H591.222V70.6711ZM605.419 70.6711H598.32V68.1711H605.419V70.6711ZM619.616 70.6711H612.518V68.1711H619.616V70.6711ZM633.814 70.6711H626.715V68.1711H633.814V70.6711Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M591.577 28.7592C592.453 27.3161 594.547 27.3161 595.423 28.7592L602.146 39.8323C603.057 41.3318 601.977 43.25 600.223 43.25H586.777C585.023 43.25 583.943 41.3318 584.854 39.8323L591.577 28.7592ZM593.5 30.4086L587.221 40.75H599.779L593.5 30.4086Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M604.108 91.2C603.773 91.2 603.522 91.5062 603.587 91.8347L610.249 125.438C610.299 125.686 610.514 125.863 610.764 125.865C618.495 125.929 629.429 126.001 638.391 126.019C642.873 126.029 646.858 126.025 649.702 126C651.126 125.987 652.255 125.969 653.016 125.946C653.037 125.945 653.058 125.944 653.078 125.944C653.155 125.382 653.277 124.61 653.435 123.682C653.76 121.761 654.243 119.12 654.801 116.16C655.917 110.236 657.337 103.006 658.409 97.6262C658.475 97.2966 658.223 96.9896 657.887 96.9896H626.834C625.572 96.9896 624.453 96.183 624.053 94.9868L622.91 91.5631C622.838 91.3463 622.635 91.2 622.406 91.2H604.108ZM601.233 92.3015C600.873 90.489 602.26 88.8 604.108 88.8H622.406C623.667 88.8 624.787 89.6067 625.186 90.8029L626.33 94.2266C626.402 94.4434 626.605 94.5896 626.834 94.5896H657.887C659.737 94.5896 661.124 96.2815 660.763 98.0953C659.692 103.471 658.274 110.692 657.16 116.604C656.603 119.561 656.123 122.184 655.801 124.083C655.639 125.035 655.52 125.793 655.449 126.317C655.415 126.573 655.395 126.754 655.387 126.867C655.429 127.128 655.382 127.381 655.278 127.591C655.094 127.964 654.78 128.116 654.724 128.143L654.72 128.145C654.559 128.224 654.407 128.252 654.37 128.259L654.367 128.26C654.254 128.281 654.127 128.293 654.025 128.301C653.801 128.318 653.481 128.332 653.091 128.345C652.302 128.369 651.15 128.387 649.723 128.399C646.866 128.425 642.87 128.429 638.386 128.419C629.417 128.401 618.478 128.329 610.745 128.265C609.354 128.254 608.165 127.266 607.895 125.904L601.233 92.3015Z" fill="#14648e"/>
<path d="M618.593 112.323V110.172L625.294 106.589V108.739L620.666 111.185V111.31L625.294 113.756V115.907L618.593 112.323Z" fill="#14648e"/>
<path d="M627.749 118.15L632.751 104.158H634.824L629.822 118.15H627.749Z" fill="#14648e"/>
<path d="M637.28 113.756L641.907 111.31V111.185L637.28 108.739V106.589L643.98 110.172V112.323L637.28 115.907V113.756Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M618.493 112.383V110.112L625.393 106.422V108.799L620.766 111.245V111.25L625.393 113.696V116.073L618.493 112.383ZM620.666 111.185L625.293 108.739V106.588L618.593 110.172V112.323L625.293 115.907V113.756L620.666 111.31V111.185ZM627.607 118.25L632.681 104.058H634.966L629.892 118.25H627.607ZM627.749 118.15H629.822L634.824 104.158H632.751L627.749 118.15ZM637.18 113.696L641.807 111.25V111.245L637.18 108.799V106.422L644.08 110.112V112.383L637.18 116.073V113.696ZM641.907 111.31L637.28 113.756V115.907L643.98 112.323V110.172L637.28 106.588V108.739L641.907 111.185V111.31Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M676.065 -23.4312C673.797 -24.795 671.343 -25.8829 668.752 -26.6485C668.749 -26.672 668.746 -26.6956 668.743 -26.7191L667.784 -34.1293C667.572 -35.7711 666.174 -37 664.518 -37H655.817C654.161 -37 652.763 -35.7711 652.551 -34.1293L651.613 -26.8877C648.819 -26.147 646.175 -25.0342 643.74 -23.6054C643.703 -23.6361 643.665 -23.6661 643.626 -23.6954L637.663 -28.1986C636.342 -29.1963 634.486 -29.0583 633.327 -27.8762L627.235 -21.6629C626.076 -20.4809 625.974 -18.6221 626.998 -17.321L631.618 -11.4483C631.68 -11.3695 631.745 -11.2943 631.812 -11.2227C630.661 -9.07978 629.749 -6.78915 629.112 -4.38659L621.871 -3.44944C620.229 -3.23697 619 -1.83877 619 -0.183277V8.51837C619 10.1739 620.229 11.5721 621.871 11.7845L629.281 12.7435C629.304 12.7465 629.328 12.7493 629.352 12.7519C630.051 15.1202 631.02 17.373 632.223 19.4748L627.807 25.2581C626.802 26.5739 626.93 28.4309 628.106 29.5962L634.288 35.7208C635.464 36.8861 637.322 36.9972 638.628 35.9805L644.397 31.4918C646.655 32.7452 649.083 33.7308 651.637 34.4055L652.551 41.4643C652.763 43.1061 654.161 44.335 655.817 44.335H664.518C666.174 44.335 667.572 43.1061 667.785 41.4643L668.729 34.1669C671.078 33.475 673.313 32.5184 675.4 31.3317C675.425 31.352 675.45 31.372 675.475 31.3917L681.372 35.9805C682.678 36.9972 684.536 36.8861 685.712 35.7208L691.894 29.5962C693.07 28.4309 693.198 26.5739 692.193 25.2581L687.659 19.3193C687.605 19.2489 687.549 19.1813 687.491 19.1166C688.612 17.0973 689.518 14.9429 690.18 12.6838L697.129 11.7845C698.771 11.572 700 10.1738 700 8.51832V-0.183323C700 -1.83882 698.771 -3.23702 697.129 -3.44949L690.417 -4.31808C689.81 -6.63241 688.947 -8.84337 687.861 -10.9191C688.05 -11.0718 688.225 -11.2483 688.382 -11.4483L693.002 -17.321C694.026 -18.6221 693.924 -20.4809 692.765 -21.6629L686.673 -27.8762C685.514 -29.0583 683.658 -29.1963 682.337 -28.1986L676.374 -23.6954C676.264 -23.6125 676.161 -23.5242 676.065 -23.4312ZM685.672 20.8365C685.658 20.818 685.643 20.8007 685.629 20.7846L684.442 19.4592L685.305 17.9034C686.337 16.0437 687.172 14.0602 687.781 11.9809L688.24 10.414L696.808 9.30516C697.204 9.25397 697.5 8.91714 697.5 8.51832V-0.183323C697.5 -0.582142 697.204 -0.918976 696.808 -0.970161L688.427 -2.05482L687.999 -3.68309C687.44 -5.8133 686.646 -7.84867 685.646 -9.76011L684.696 -11.5756L686.289 -12.8635C686.333 -12.8991 686.376 -12.9419 686.417 -12.994L691.037 -18.8667C691.284 -19.1801 691.259 -19.6279 690.98 -19.9127L684.888 -26.126C684.609 -26.4107 684.162 -26.444 683.843 -26.2036L677.881 -21.7004C677.852 -21.6787 677.826 -21.6564 677.803 -21.6339L676.423 -20.2994L674.777 -21.2886C672.688 -22.5448 670.429 -23.5463 668.044 -24.251L666.445 -24.7231L666.266 -26.3798L666.264 -26.3983L665.305 -33.8084C665.254 -34.2039 664.917 -34.5 664.518 -34.5H655.817C655.418 -34.5 655.081 -34.2039 655.03 -33.8084L653.877 -24.9013L652.254 -24.4711C649.682 -23.7894 647.248 -22.765 645.005 -21.4491L643.497 -20.5646L642.149 -21.6772C642.139 -21.685 642.129 -21.6927 642.119 -21.7004L636.157 -26.2036C635.838 -26.444 635.391 -26.4107 635.112 -26.126L629.02 -19.9127C628.741 -19.6279 628.716 -19.1801 628.963 -18.8667L633.583 -12.994C633.599 -12.9733 633.615 -12.9542 633.632 -12.9366L634.866 -11.6258L634.014 -10.0397C632.954 -8.06631 632.115 -5.95756 631.529 -3.74615L631.099 -2.1228L622.192 -0.970116C621.796 -0.91893 621.5 -0.582096 621.5 -0.183277V8.51837C621.5 8.91719 621.796 9.25402 622.192 9.30521L629.602 10.2642L629.62 10.2663L631.277 10.4454L631.749 12.0436C632.393 14.2234 633.285 16.2975 634.392 18.2333L635.216 19.6734L629.794 26.7752C629.552 27.0922 629.583 27.5396 629.866 27.8203L636.047 33.945C636.33 34.2257 636.778 34.2525 637.093 34.0075L644.165 28.5041L645.61 29.3058C647.69 30.4602 649.925 31.3674 652.276 31.9884L653.901 32.4178L655.03 41.1434C655.081 41.539 655.418 41.835 655.817 41.835H664.518C664.917 41.835 665.254 41.5389 665.305 41.1434L666.459 32.2294L668.022 31.7688C670.184 31.132 672.242 30.2514 674.164 29.1584L675.662 28.307L676.991 29.4031C676.997 29.4082 677.003 29.4135 677.01 29.4187L682.907 34.0075C683.222 34.2525 683.67 34.2257 683.953 33.945L690.134 27.8203C690.417 27.5396 690.448 27.0922 690.206 26.7752L685.672 20.8365Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M660.167 19.9578C668.336 19.9578 674.958 13.336 674.958 5.16749C674.958 -3.001 668.336 -9.62286 660.167 -9.62286C651.999 -9.62286 645.377 -3.001 645.377 5.16749C645.377 13.336 651.999 19.9578 660.167 19.9578ZM660.167 22.4578C669.717 22.4578 677.458 14.7167 677.458 5.16749C677.458 -4.38171 669.717 -12.1229 660.167 -12.1229C650.618 -12.1229 642.877 -4.38171 642.877 5.16749C642.877 14.7167 650.618 22.4578 660.167 22.4578Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M694.359 15.75H699.193V18.25H694.359V15.75ZM713.526 18.25H706.359V15.75H713.526V18.25ZM724.276 18.25H720.692V15.75H724.276C725.426 15.75 726.551 15.8628 727.641 16.0783L727.156 18.5308C726.225 18.3467 725.262 18.25 724.276 18.25ZM736.542 24.8056C735.465 23.1965 734.079 21.8109 732.47 20.7337L733.861 18.6562C735.741 19.9153 737.36 21.5342 738.619 23.4148L736.542 24.8056ZM739.026 33C739.026 32.0133 738.929 31.0505 738.745 30.12L741.197 29.6348C741.413 30.7242 741.526 31.8496 741.526 33V37.0439H739.026V33ZM739.026 53.2193V45.1316H741.526V53.2193H739.026ZM739.026 69.3948V61.307H741.526V69.3948H739.026ZM739.026 81.5263V77.4825H741.526V81.5263C741.526 82.513 741.622 83.4759 741.806 84.4063L739.354 84.8915C739.138 83.8021 739.026 82.6768 739.026 81.5263ZM746.69 95.8701C744.81 94.6111 743.191 92.9922 741.932 91.1116L744.009 89.7208C745.086 91.3298 746.472 92.7154 748.081 93.7927L746.69 95.8701ZM756.276 98.7763C755.125 98.7763 754 98.6635 752.91 98.448L753.396 95.9955C754.326 96.1796 755.289 96.2763 756.276 96.2763H759.116V98.7763H756.276ZM770.478 98.7763H764.797V96.2763H770.478V98.7763ZM776.159 98.7763V96.2763H780.25V98.7763H776.159Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M708 119.5C709.933 119.5 711.5 117.933 711.5 116C711.5 114.067 709.933 112.5 708 112.5C706.067 112.5 704.5 114.067 704.5 116C704.5 117.933 706.067 119.5 708 119.5ZM708 122C711.314 122 714 119.314 714 116C714 112.686 711.314 110 708 110C704.686 110 702 112.686 702 116C702 119.314 704.686 122 708 122Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M682 139.5C683.933 139.5 685.5 137.933 685.5 136C685.5 134.067 683.933 132.5 682 132.5C680.067 132.5 678.5 134.067 678.5 136C678.5 137.933 680.067 139.5 682 139.5ZM682 142C685.314 142 688 139.314 688 136C688 132.686 685.314 130 682 130C678.686 130 676 132.686 676 136C676 139.314 678.686 142 682 142Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M734.647 139.5C732.714 139.5 731.147 137.933 731.147 136C731.147 134.067 732.714 132.5 734.647 132.5C736.58 132.5 738.147 134.067 738.147 136C738.147 137.933 736.58 139.5 734.647 139.5ZM734.647 142C731.334 142 728.647 139.314 728.647 136C728.647 132.686 731.334 130 734.647 130C737.961 130 740.647 132.686 740.647 136C740.647 139.314 737.961 142 734.647 142Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M704.287 118.21C704.724 118.744 704.644 119.532 704.109 119.968L687.119 133.837C686.584 134.273 685.797 134.193 685.36 133.659C684.924 133.124 685.003 132.336 685.538 131.9L702.529 118.032C703.063 117.595 703.851 117.675 704.287 118.21Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M712.36 118.21C711.924 118.744 712.003 119.532 712.538 119.968L729.528 133.837C730.063 134.273 730.851 134.193 731.287 133.659C731.724 133.124 731.644 132.336 731.109 131.9L714.119 118.032C713.584 117.595 712.797 117.675 712.36 118.21Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M787 40.25C785.481 40.25 784.25 41.4812 784.25 43V116C784.25 117.519 785.481 118.75 787 118.75H830V121.25H787C784.101 121.25 781.75 118.899 781.75 116V43C781.75 40.1005 784.101 37.75 787 37.75H796V40.25H787Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M794.75 34C794.75 31.1005 797.101 28.75 800 28.75H843C843.348 28.75 843.681 28.8954 843.918 29.1512L862.418 49.1512C862.631 49.3822 862.75 49.6853 862.75 50V80.5C862.75 81.1904 862.19 81.75 861.5 81.75C860.81 81.75 860.25 81.1904 860.25 80.5V50.4895L842.453 31.25H800C798.481 31.25 797.25 32.4812 797.25 34V104C797.25 105.519 798.481 106.75 800 106.75H828.5C829.19 106.75 829.75 107.31 829.75 108C829.75 108.69 829.19 109.25 828.5 109.25H800C797.101 109.25 794.75 106.899 794.75 104V34Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M807.75 59.5C807.75 58.8096 808.31 58.25 809 58.25H839C839.69 58.25 840.25 58.8096 840.25 59.5C840.25 60.1904 839.69 60.75 839 60.75H809C808.31 60.75 807.75 60.1904 807.75 59.5Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M814.75 70.5C814.75 69.8096 815.31 69.25 816 69.25H846C846.69 69.25 847.25 69.8096 847.25 70.5C847.25 71.1904 846.69 71.75 846 71.75H816C815.31 71.75 814.75 71.1904 814.75 70.5Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M807.75 81.5C807.75 80.8096 808.31 80.25 809 80.25H839C839.69 80.25 840.25 80.8096 840.25 81.5C840.25 82.1904 839.69 82.75 839 82.75H809C808.31 82.75 807.75 82.1904 807.75 81.5Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M842.5 28.75C843.19 28.75 843.75 29.3096 843.75 30V48.75H861C861.69 48.75 862.25 49.3096 862.25 50C862.25 50.6904 861.69 51.25 861 51.25H842.5C841.81 51.25 841.25 50.6904 841.25 50V30C841.25 29.3096 841.81 28.75 842.5 28.75Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M880.5 30.5C883.261 30.5 885.5 28.2614 885.5 25.5C885.5 22.7386 883.261 20.5 880.5 20.5C877.739 20.5 875.5 22.7386 875.5 25.5C875.5 28.2614 877.739 30.5 880.5 30.5ZM880.5 33C884.642 33 888 29.6421 888 25.5C888 21.3579 884.642 18 880.5 18C876.358 18 873 21.3579 873 25.5C873 29.6421 876.358 33 880.5 33Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M893.996 48.6412C908.055 45.8749 922.531 50.0153 931.571 54.6546C931.988 54.8686 932.25 55.298 932.25 55.7667V134C932.25 134.69 931.691 135.25 931 135.25H837.136C836.733 135.25 836.355 135.056 836.12 134.728C830.097 126.318 827.55 118.575 827.454 111.726C827.359 104.872 829.721 99.0164 833.332 94.4009C836.933 89.7967 841.778 86.4222 846.691 84.4774C851.118 82.7253 855.705 82.0948 859.554 82.8752C866.706 60.8954 880.046 51.3859 893.996 48.6412ZM894.478 51.0942C881.265 53.6939 868.394 62.7654 861.585 84.7381C861.383 85.3905 860.694 85.7599 860.039 85.5676C856.665 84.5777 852.169 84.9982 847.612 86.802C843.075 88.5975 838.606 91.7164 835.301 95.9412C832.004 100.155 829.867 105.464 829.954 111.691C830.039 117.79 832.259 124.869 837.783 132.75H929.75V56.5365C921.001 52.2078 907.476 48.5368 894.478 51.0942Z" fill="#14648e"/>
<path d="M891.253 117.8C890.853 117.8 890.467 117.727 890.093 117.58C889.72 117.433 889.387 117.22 889.093 116.94C888.813 116.673 888.587 116.34 888.413 115.94C888.24 115.553 888.153 115.113 888.153 114.62V111.18C888.153 110.807 888.067 110.493 887.893 110.24C887.72 109.973 887.46 109.84 887.113 109.84V108.4C887.46 108.4 887.72 108.273 887.893 108.02C888.067 107.753 888.153 107.433 888.153 107.06V103.64C888.153 103.147 888.24 102.707 888.413 102.32C888.587 101.92 888.813 101.58 889.093 101.3C889.387 101.02 889.72 100.807 890.093 100.66C890.467 100.513 890.853 100.44 891.253 100.44H892.453V102.14H891.253C890.907 102.14 890.607 102.273 890.353 102.54C890.113 102.807 889.993 103.167 889.993 103.62V107.32C889.993 107.747 889.92 108.12 889.773 108.44C889.64 108.747 889.407 108.973 889.073 109.12C889.407 109.267 889.64 109.5 889.773 109.82C889.92 110.127 889.993 110.493 889.993 110.92V114.6C889.993 115.053 890.113 115.413 890.353 115.68C890.607 115.96 890.907 116.1 891.253 116.1H892.453V117.8H891.253Z" fill="#14648e"/>
<path d="M895.355 114.22C894.915 114.22 894.535 114.06 894.215 113.74C893.895 113.42 893.735 113.04 893.735 112.6C893.735 112.16 893.895 111.78 894.215 111.46C894.535 111.14 894.915 110.98 895.355 110.98C895.795 110.98 896.175 111.14 896.495 111.46C896.815 111.78 896.975 112.16 896.975 112.6C896.975 113.04 896.815 113.42 896.495 113.74C896.175 114.06 895.795 114.22 895.355 114.22Z" fill="#14648e"/>
<path d="M900.492 114.22C900.052 114.22 899.672 114.06 899.352 113.74C899.032 113.42 898.872 113.04 898.872 112.6C898.872 112.16 899.032 111.78 899.352 111.46C899.672 111.14 900.052 110.98 900.492 110.98C900.932 110.98 901.312 111.14 901.632 111.46C901.952 111.78 902.112 112.16 902.112 112.6C902.112 113.04 901.952 113.42 901.632 113.74C901.312 114.06 900.932 114.22 900.492 114.22Z" fill="#14648e"/>
<path d="M905.628 114.22C905.188 114.22 904.808 114.06 904.488 113.74C904.168 113.42 904.008 113.04 904.008 112.6C904.008 112.16 904.168 111.78 904.488 111.46C904.808 111.14 905.188 110.98 905.628 110.98C906.068 110.98 906.448 111.14 906.768 111.46C907.088 111.78 907.248 112.16 907.248 112.6C907.248 113.04 907.088 113.42 906.768 113.74C906.448 114.06 906.068 114.22 905.628 114.22Z" fill="#14648e"/>
<path d="M908.545 116.1H909.765C910.112 116.1 910.405 115.96 910.645 115.68C910.898 115.413 911.025 115.053 911.025 114.6V110.92C911.025 110.493 911.092 110.127 911.225 109.82C911.372 109.5 911.612 109.267 911.945 109.12C911.612 108.973 911.372 108.747 911.225 108.44C911.092 108.12 911.025 107.747 911.025 107.32V103.62C911.025 103.167 910.898 102.807 910.645 102.54C910.405 102.273 910.112 102.14 909.765 102.14H908.545V100.44H909.765C910.165 100.44 910.552 100.513 910.925 100.66C911.298 100.807 911.625 101.02 911.905 101.3C912.198 101.58 912.432 101.92 912.605 102.32C912.778 102.707 912.865 103.147 912.865 103.64V107.06C912.865 107.433 912.952 107.753 913.125 108.02C913.298 108.273 913.558 108.4 913.905 108.4V109.84C913.558 109.84 913.298 109.973 913.125 110.24C912.952 110.493 912.865 110.807 912.865 111.18V114.62C912.865 115.113 912.778 115.553 912.605 115.94C912.432 116.34 912.198 116.673 911.905 116.94C911.625 117.22 911.298 117.433 910.925 117.58C910.552 117.727 910.165 117.8 909.765 117.8H908.545V116.1Z" fill="#14648e"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M891.253 100.59C890.872 100.59 890.504 100.66 890.148 100.8C889.794 100.939 889.477 101.141 889.198 101.407C888.933 101.672 888.717 101.996 888.551 102.38L888.55 102.381L888.55 102.381C888.387 102.746 888.303 103.164 888.303 103.64V107.06C888.303 107.457 888.211 107.807 888.019 108.102L888.017 108.105L888.017 108.105C887.839 108.365 887.582 108.509 887.263 108.542V109.698C887.584 109.733 887.841 109.885 888.018 110.157C888.211 110.44 888.303 110.784 888.303 111.18V114.62C888.303 115.096 888.387 115.514 888.55 115.879L888.551 115.88C888.717 116.264 888.933 116.58 889.197 116.831L889.197 116.832C889.476 117.098 889.793 117.301 890.148 117.44C890.504 117.58 890.872 117.65 891.253 117.65H892.303V116.25H891.253C890.86 116.25 890.521 116.089 890.242 115.781L890.242 115.78C889.971 115.479 889.843 115.08 889.843 114.6V110.92C889.843 110.51 889.773 110.167 889.638 109.885L889.635 109.878L889.635 109.878C889.517 109.595 889.313 109.389 889.013 109.257C888.958 109.233 888.923 109.18 888.923 109.12C888.923 109.061 888.958 109.007 889.013 108.983C889.315 108.85 889.519 108.649 889.636 108.38L889.637 108.378L889.637 108.378C889.773 108.081 889.843 107.73 889.843 107.32V103.62C889.843 103.14 889.971 102.741 890.242 102.44L890.244 102.437L890.244 102.437C890.524 102.142 890.863 101.99 891.253 101.99H892.303V100.59H891.253ZM890.038 100.52C890.429 100.367 890.834 100.29 891.253 100.29H892.453C892.536 100.29 892.603 100.357 892.603 100.44V102.14C892.603 102.223 892.536 102.29 892.453 102.29H891.253C890.951 102.29 890.69 102.404 890.463 102.642C890.255 102.874 890.143 103.195 890.143 103.62V107.32C890.143 107.763 890.067 108.158 889.91 108.501C889.798 108.759 889.621 108.966 889.386 109.121C889.622 109.278 889.798 109.491 889.91 109.759C890.068 110.089 890.143 110.478 890.143 110.92V114.6C890.143 115.026 890.255 115.347 890.465 115.58C890.692 115.831 890.953 115.95 891.253 115.95H892.453C892.536 115.95 892.603 116.017 892.603 116.1V117.8C892.603 117.883 892.536 117.95 892.453 117.95H891.253C890.834 117.95 890.429 117.873 890.038 117.72C889.647 117.566 889.297 117.342 888.99 117.049L889.093 116.94L888.99 117.049C888.99 117.049 888.99 117.049 888.99 117.049C888.694 116.767 888.456 116.416 888.276 116.001C888.276 116.001 888.276 116.001 888.276 116.001L888.413 115.94L888.276 116C888.276 116 888.276 116 888.276 116.001M887.768 107.937C887.923 107.699 888.003 107.409 888.003 107.06V103.64C888.003 103.129 888.093 102.668 888.276 102.26C888.456 101.844 888.693 101.488 888.987 101.194L888.99 101.192L888.99 101.192C889.297 100.898 889.647 100.674 890.038 100.52M908.395 100.44C908.395 100.357 908.462 100.29 908.545 100.29H909.765C910.184 100.29 910.589 100.367 910.98 100.52C911.372 100.674 911.715 100.899 912.01 101.193C912.318 101.487 912.562 101.843 912.742 102.26C912.742 102.26 912.743 102.26 912.743 102.26L912.605 102.32L912.742 102.259C912.742 102.259 912.742 102.259 912.742 102.26C912.925 102.668 913.015 103.129 913.015 103.64V107.06C913.015 107.409 913.096 107.699 913.25 107.937C913.25 107.936 913.249 107.936 913.249 107.935L913.125 108.02L913.251 107.938C913.25 107.938 913.25 107.937 913.25 107.937C913.391 108.143 913.601 108.25 913.905 108.25C913.988 108.25 914.055 108.317 914.055 108.4V109.84C914.055 109.923 913.988 109.99 913.905 109.99C913.604 109.99 913.394 110.102 913.251 110.322L913.249 110.325L913.249 110.325C913.096 110.548 913.015 110.83 913.015 111.18V114.62C913.015 115.131 912.925 115.592 912.742 116C912.742 116 912.743 116 912.743 116L912.605 115.94L912.742 116.001C912.742 116.001 912.742 116.001 912.742 116C912.562 116.417 912.318 116.767 912.009 117.049C912.009 117.048 912.01 117.047 912.011 117.046L911.905 116.94L912.006 117.051C912.007 117.05 912.008 117.049 912.009 117.049C911.714 117.342 911.371 117.566 910.98 117.72C910.589 117.873 910.184 117.95 909.765 117.95H908.545C908.462 117.95 908.395 117.883 908.395 117.8V116.1C908.395 116.017 908.462 115.95 908.545 115.95H909.765C910.065 115.95 910.318 115.832 910.531 115.582L910.536 115.577L910.536 115.577C910.757 115.344 910.875 115.024 910.875 114.6V110.92C910.875 110.478 910.944 110.09 911.087 109.76L911.089 109.758C911.211 109.491 911.393 109.278 911.631 109.121C911.709 109.172 911.794 109.217 911.885 109.257L911.945 109.12L911.885 108.983C911.794 109.023 911.709 109.069 911.631 109.121C911.394 108.966 911.212 108.761 911.09 108.505L911.086 108.498L911.087 108.498C910.944 108.156 910.875 107.762 910.875 107.32V103.62C910.875 103.196 910.757 102.876 910.536 102.643L910.534 102.64L910.534 102.64C910.321 102.404 910.068 102.29 909.765 102.29H908.545C908.462 102.29 908.395 102.223 908.395 102.14V100.44ZM908.695 100.59V101.99H909.765C910.155 101.99 910.488 102.142 910.755 102.438C911.04 102.739 911.175 103.138 911.175 103.62V107.32C911.175 107.73 911.239 108.081 911.362 108.379C911.492 108.649 911.704 108.85 912.005 108.983C912.06 109.007 912.095 109.061 912.095 109.12C912.095 109.18 912.06 109.233 912.005 109.257C911.705 109.39 911.493 109.597 911.362 109.881C911.239 110.164 911.175 110.509 911.175 110.92V114.6C911.175 115.081 911.04 115.48 910.756 115.781C910.49 116.089 910.157 116.25 909.765 116.25H908.695V117.65H909.765C910.146 117.65 910.514 117.58 910.87 117.44C911.225 117.301 911.534 117.099 911.799 116.834L911.804 116.829L911.804 116.829C912.08 116.578 912.302 116.263 912.467 115.88L912.468 115.879C912.632 115.514 912.715 115.096 912.715 114.62V111.18C912.715 110.784 912.807 110.44 913 110.157C913.177 109.885 913.434 109.733 913.755 109.698V108.542C913.436 108.509 913.179 108.365 913.001 108.105L912.999 108.102L912.999 108.102C912.808 107.807 912.715 107.457 912.715 107.06V103.64C912.715 103.164 912.632 102.746 912.468 102.381L912.467 102.38C912.301 101.997 912.079 101.674 911.801 101.409L911.799 101.406L911.799 101.406C911.534 101.141 911.225 100.939 910.87 100.8C910.514 100.66 910.146 100.59 909.765 100.59H908.695ZM895.355 111.13C894.956 111.13 894.614 111.273 894.321 111.566C894.028 111.859 893.885 112.201 893.885 112.6C893.885 112.999 894.028 113.341 894.321 113.634C894.614 113.927 894.956 114.07 895.355 114.07C895.754 114.07 896.096 113.927 896.389 113.634C896.682 113.341 896.825 112.999 896.825 112.6C896.825 112.201 896.682 111.859 896.389 111.566C896.096 111.273 895.754 111.13 895.355 111.13ZM894.109 111.354C894.456 111.007 894.874 110.83 895.355 110.83C895.836 110.83 896.254 111.007 896.601 111.354C896.948 111.701 897.125 112.119 897.125 112.6C897.125 113.081 896.948 113.499 896.601 113.846C896.254 114.193 895.836 114.37 895.355 114.37C894.874 114.37 894.456 114.193 894.109 113.846C893.762 113.499 893.585 113.081 893.585 112.6C893.585 112.119 893.762 111.701 894.109 111.354ZM900.492 111.13C900.093 111.13 899.75 111.273 899.458 111.566C899.165 111.859 899.022 112.201 899.022 112.6C899.022 112.999 899.165 113.341 899.458 113.634C899.75 113.927 900.093 114.07 900.492 114.07C900.891 114.07 901.233 113.927 901.526 113.634C901.818 113.341 901.962 112.999 901.962 112.6C901.962 112.201 901.818 111.859 901.526 111.566C901.233 111.273 900.891 111.13 900.492 111.13ZM899.246 111.354C899.593 111.007 900.011 110.83 900.492 110.83C900.973 110.83 901.39 111.007 901.738 111.354C902.085 111.701 902.262 112.119 902.262 112.6C902.262 113.081 902.085 113.499 901.738 113.846C901.39 114.193 900.973 114.37 900.492 114.37C900.011 114.37 899.593 114.193 899.246 113.846C898.898 113.499 898.722 113.081 898.722 112.6C898.722 112.119 898.898 111.701 899.246 111.354ZM905.628 111.13C905.229 111.13 904.887 111.273 904.594 111.566C904.302 111.859 904.158 112.201 904.158 112.6C904.158 112.999 904.302 113.341 904.594 113.634C904.887 113.927 905.229 114.07 905.628 114.07C906.027 114.07 906.37 113.927 906.662 113.634C906.955 113.341 907.098 112.999 907.098 112.6C907.098 112.201 906.955 111.859 906.662 111.566C906.37 111.273 906.027 111.13 905.628 111.13ZM904.382 111.354C904.73 111.007 905.147 110.83 905.628 110.83C906.109 110.83 906.527 111.007 906.874 111.354C907.222 111.701 907.398 112.119 907.398 112.6C907.398 113.081 907.222 113.499 906.874 113.846C906.527 114.193 906.109 114.37 905.628 114.37C905.147 114.37 904.73 114.193 904.382 113.846C904.035 113.499 903.858 113.081 903.858 112.6C903.858 112.119 904.035 111.701 904.382 111.354Z" fill="#14648e"/>
</g>
<defs>
<clipPath id="clip0_308_3856">
<rect width="925" height="150" fill="white"/>
</clipPath>
</defs>
</svg>
" + }, + "logo": { + "image": "data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4NCjwhLS0gR2VuZXJhdG9yOiBBZG9iZSBJbGx1c3RyYXRvciAxOS4yLjEsIFNWRyBFeHBvcnQgUGx1Zy1JbiAuIFNWRyBWZXJzaW9uOiA2LjAwIEJ1aWxkIDApICAtLT4NCjxzdmcgdmVyc2lvbj0iMS4xIiBpZD0ibGF5ZXIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4Ig0KCSB2aWV3Qm94PSIwIDAgNjUyIDY1MiIgc3R5bGU9ImVuYWJsZS1iYWNrZ3JvdW5kOm5ldyAwIDAgNjUyIDY1MjsiIHhtbDpzcGFjZT0icHJlc2VydmUiPg0KPHN0eWxlIHR5cGU9InRleHQvY3NzIj4NCgkuc3Qwe2ZpbGw6bm9uZTt9DQoJLnN0MXtmaWxsOiMwMEFBNEU7fQ0KPC9zdHlsZT4NCjxyZWN0IGlkPSJiYWNrZ3JvdW5kcmVjdCIgeD0iMjAuMyIgeT0iMjU3LjUiIGNsYXNzPSJzdDAiIHdpZHRoPSI2MDAiIGhlaWdodD0iMTI4LjUiLz4NCjxnPg0KCTxnIGlkPSJzdmdfMSI+DQoJCTxwYXRoIGlkPSJzdmdfMiIgY2xhc3M9InN0MSIgZD0iTTExNC43LDM3NC4xYzQuNiw1LjksMTIuNCw4LjUsMTguOSw4LjVjOS4xLDAsMTUuNi02LjIsMTUuNi0xMy4zdi05NS4zYzAtNy41LTYuMi0xMy4zLTEzLjMtMTMuMw0KCQkJaC0wLjdjLTcuNSwwLTEzLjMsNi4yLTEzLjMsMTMuM3Y2OS4zTDYwLDI2OS4xYy00LjYtNS45LTEyLjQtOC41LTE4LjktOC41Yy05LjEsMC0xNS42LDYuMi0xNS42LDEzLjN2OTUuMw0KCQkJYzAsNy41LDYuMiwxMy4zLDEzLjMsMTMuM2gwLjdjNy41LDAsMTMuMy02LjIsMTMuMy0xMy4zVjMwMEwxMTQuNywzNzQuMXoiLz4NCgkJPGcgaWQ9InN2Z18zIj4NCgkJCTxwYXRoIGlkPSJzdmdfNCIgY2xhc3M9InN0MSIgZD0iTTYxMC4zLDM1OS4xbC0zNy43LTM3LjdsMzcuNy0zNy43YzUuMi01LjIsNS4yLTEzLjcsMC0xOC45bC0wLjctMC43Yy01LjItNS4yLTEzLjctNS4yLTE4LjksMA0KCQkJCUw1NTcuOSwyOTdsLTIuMywyLjNsLTEuMywxLjNjLTAuNywwLjctMS4zLDAuNy0yLDBsLTEuNi0xLjZsLTItMkw1MTYsMjY0LjJjLTUuMi01LjItMTMuNy01LjItMTguOSwwbC0wLjcsMC43DQoJCQkJYy01LjIsNS4yLTUuMiwxMy43LDAsMTguOWwzNy4xLDM3LjdsLTM3LjcsMzcuN2MtNS4yLDUuMi01LjIsMTMuNywwLDE4LjlsMC43LDAuN2M1LjIsNS4yLDEzLjcsNS4yLDE4LjksMGwzMi44LTMyLjhsMi4zLTIuMw0KCQkJCWwxLjMtMS4zYzAuNy0wLjcsMS4zLTAuNywyLDBsMSwxbDIuOSwyLjlsMzIuOCwzMi44YzUuMiw1LjIsMTMuNyw1LjIsMTguOSwwbDAuNy0wLjdDNjE1LjUsMzcyLjgsNjE1LjUsMzY0LjMsNjEwLjMsMzU5LjENCgkJCQlMNjEwLjMsMzU5LjF6Ii8+DQoJCTwvZz4NCgkJPHBhdGggaWQ9InN2Z181IiBjbGFzcz0ic3QxIiBkPSJNMzg4LjgsMzc0LjFjLTQuNiw1LjktMTIuNCw4LjUtMTguOSw4LjVjLTkuMSwwLTE1LjYtNi4yLTE1LjYtMTMuM3YtOTUuMw0KCQkJYzAtNy41LDYuMi0xMy4zLDEzLjMtMTMuM2gwLjdjNy41LDAsMTMuMyw2LjIsMTMuMywxMy4zdjY5LjNsNjEuOC03NC4xYzQuNi01LjksMTIuNC04LjUsMTguOS04LjVjOS4xLDAsMTUuNiw2LjIsMTUuNiwxMy4zdjk1LjMNCgkJCWMwLDcuNS02LjIsMTMuMy0xMy4zLDEzLjNoLTAuN2MtNy41LDAtMTMuMy02LjItMTMuMy0xMy4zVjMwMEwzODguOCwzNzQuMUwzODguOCwzNzQuMXoiLz4NCgkJPHBhdGggaWQ9InN2Z182IiBjbGFzcz0ic3QxIiBkPSJNMzM1LjgsMjczLjNjMC03LjItNS41LTEzLTEyLTEzcy0xMiw1LjktMTIsMTN2MzEuMmMwLDAuNywwLjcsMS4zLDEuMywxLjNoMjEuNQ0KCQkJYzAuNywwLDEuMy0wLjcsMS4zLTEuM0MzMzUuOCwzMDQuNSwzMzUuOCwyNzMuMywzMzUuOCwyNzMuM3oiLz4NCgkJPHBhdGggaWQ9InN2Z183IiBjbGFzcz0ic3QxIiBkPSJNMzM1LjgsMzE4LjVjMC0wLjctMC43LTEuMy0xLjMtMS4zSDMxM2MtMC43LDAtMS4zLDAuNy0xLjMsMS4zdjUxLjRjMCw2LjgsNS41LDEyLjcsMTIsMTIuNw0KCQkJczEyLTUuNSwxMi0xMi43QzMzNS44LDM2OS45LDMzNS44LDMxOC41LDMzNS44LDMxOC41eiIvPg0KCQk8cGF0aCBpZD0ic3ZnXzgiIGNsYXNzPSJzdDEiIGQ9Ik0zMDAuNywzMTcuMmgtNzYuN2MtNy4yLDAtMTMsNC45LTEzLDExLjdjMCw2LjgsNS45LDExLjQsMTMsMTEuNGgzOC43bC0xMC40LDE3LjYNCgkJCWMtMC43LDEuMy0xLjYsMS42LTIuOSwxLjZoLTM3LjdjLTEuNiwwLTIuMy0wLjMtMi45LTEuNmwtMTkuMi0zNS4xYy0wLjctMS0wLjctMiwwLTMuM2wxOS4yLTM0LjFjMC43LTEuMywxLjYtMS42LDIuOS0xLjZoMzcuNw0KCQkJYzEuNiwwLDIuMywwLjMsMy4zLDEuNmwxMS43LDE5LjVjMC43LDEsMSwxLjMsMi4zLDEuM2gyNi43bC0yNS43LTQzLjNjLTAuNy0xLTEuMy0xLjMtMi42LTEuM0gxOTZjLTEuNiwwLTIuMywwLjctMi45LDINCgkJCWwtMzAuMiw1NS42Yy0xLDEuNi0xLDIuNiwwLDQuMmwzMS45LDU3LjJjMC43LDEuMywxLjYsMS42LDMuMywxLjZoNjZjMS42LDAsMi42LTAuMywzLjMtMkMyNjcuMiwzODAuMywzMDAuNywzMTcuMiwzMDAuNywzMTcuMnoiDQoJCQkvPg0KCTwvZz4NCjwvZz4NCjwvc3ZnPg0K", + "altText": "Al logo text" + } + }, + "layout": { + "style": "DEFAULT", + "pages": { + "apiCatalog": { + "description": "As concisely as possible, tell users what this API does. Phasellus non eleifend odio, eu posuere mi. Nulla efficitur at elit eget rutrum." + }, + "home": { + "aboutTile": { + "description": "Our developer portal showcases our APIs and their capabilities. It is a hub to help you build and deploy innovative services.", + "image": {}, + "title": "About the Portal" + }, + "gettingStarted": { + "description": "You can also add a description", + "tiles": { + "1": { + "description": "Proin pellentesque, ipsum vel aliquam lacinia.Vestibulum sem eros, sagittis vel dui.", + "image": {}, + "title": "Register" + }, + "2": { + "description": "Proin pellentesque, ipsum vel aliquam lacinia.Vestibulum sem eros, sagittis vel dui.", + "image": {}, + "title": "Explore" + }, + "3": { + "description": "Proin pellentesque, ipsum vel aliquam lacinia.Vestibulum sem eros, sagittis vel dui.", + "image": {}, + "title": "Build" + }, + "4": { + "description": "Proin pellentesque, ipsum vel aliquam lacinia.Vestibulum sem eros, sagittis vel dui.", + "image": {}, + "title": "Launch" + } + }, + "title": "Ready to get started?" + }, + "heroTitle": "Welcome to Your Portal Super Devportal", + "introTile": { + "description": "When you build applications with our APIs, you can transform experiences, help drive efficiency, and increase competitiveness. Come join us and create smart solutions.", + "image": {}, + "title": "Our APIs" + }, + "secondaryHeroTitle": "Another title", + "tile3": { + "description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus non eleifend odio, eu posuere mi. Nulla efficitur at elit eget rutrum.", + "image": {}, + "title": "Partner with us" + } + }, + "links": { + "footer": [ + { + "href": "http://example.com", + "label": "Configure Footer" + } + ] + }, + "overview": [ + { + "label": "Get Started", + "markdown": "IyBHZXQgU3RhcnRlZA0KVGhpcyBpcyB3aGVyZSB1c2VycyBjb3VsZCB0ZWxsIGN1c3RvbWVycyBhZ2FpbiBob3cgYXdlc29tZSBvdXIgQVBJcyBhcmUgYW5kIHdoeSB0aGVzZSBhcmUgdGhlIHJpZ2h0IEFQSXMgZm9yIHRoZWlyIHByb2R1Y3RzLg0KDQojIyMgSW50cm9kdWN0aW9uDQpMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBQaGFzZWxsdXMgbm9uIGVsZWlmZW5kIG9kaW8sIGV1IHBvc3VlcmUgbWkuIE51bGxhIGVmZmljaXR1ciBhdCBlbGl0IGVnZXQgcnV0cnVtLiBJbiBtb2xlc3RpZSBwaGFyZXRyYSBsaWd1bGEgZGlnbmlzc2ltIGV1aXNtb2QuIEluIGF1Y3RvciBuaXNsIHZpdGFlIGVyb3MgdmVuZW5hdGlzIGNvbmRpbWVudHVtLiBTdXNwZW5kaXNzZSBjb25kaW1lbnR1bSB1bHRyaWNlcyBxdWFtIGV0IHZpdmVycmEuIEluIGFsaXF1ZXQgdG9ydG9yIG1heGltdXMgdG9ydG9yIGRpY3R1bSwgZXUgdGluY2lkdW50IG1ldHVzIGZyaW5naWxsYS4gUHJhZXNlbnQgZWdldCBjb25zZWN0ZXR1ciBkdWkuIENyYXMgZXQgbWV0dXMgaWFjdWxpcywgc2FnaXR0aXMgbGliZXJvIHNpdCBhbWV0LCBkaWduaXNzaW0gbmlzaS4gU3VzcGVuZGlzc2UgbWFsZXN1YWRhIGVsaXQgdm9sdXRwYXQsIHN1c2NpcGl0IHVybmEgZWdldCwgZmVybWVudHVtIGxhY3VzLg0KDQojIyMgSG93IHRvIGdldCBzdGFydGVkDQpMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBQaGFzZWxsdXMgbm9uIGVsZWlmZW5kIG9kaW8sIGV1IHBvc3VlcmUgbWkuIE51bGxhIGVmZmljaXR1ciBhdCBlbGl0IGVnZXQgcnV0cnVtLiBJbiBtb2xlc3RpZSBwaGFyZXRyYSBsaWd1bGEgZGlnbmlzc2ltIGV1aXNtb2QuIEluIGF1Y3RvciBuaXNsIHZpdGFlIGVyb3MgdmVuZW5hdGlzIGNvbmRpbWVudHVtLiBTdXNwZW5kaXNzZSBjb25kaW1lbnR1bSB1bHRyaWNlcyBxdWFtIGV0IHZpdmVycmEuIEluIGFsaXF1ZXQgdG9ydG9yIG1heGltdXMgdG9ydG9yIGRpY3R1bSwgZXUgdGluY2lkdW50IG1ldHVzIGZyaW5naWxsYS4gUHJhZXNlbnQgZWdldCBjb25zZWN0ZXR1ciBkdWkuIENyYXMgZXQgbWV0dXMgaWFjdWxpcywgc2FnaXR0aXMgbGliZXJvIHNpdCBhbWV0LCBkaWduaXNzaW0gbmlzaS4gU3VzcGVuZGlzc2UgbWFsZXN1YWRhIGVsaXQgdm9sdXRwYXQsIHN1c2NpcGl0IHVybmEgZWdldCwgZmVybWVudHVtIGxhY3VzLg==" + }, + { + "label": "Authentication", + "markdown": "IyBBdXRoZW50aWNhdGlvbg0KVGhpcyBpcyB3aGVyZSB1c2VycyBjb3VsZCB0ZWxsIGN1c3RvbWVycyBhZ2FpbiBob3cgYXdlc29tZSBvdXIgQVBJcyBhcmUgYW5kIHdoeSB0aGVzZSBhcmUgdGhlIHJpZ2h0IEFQSXMgZm9yIHRoZWlyIHByb2R1Y3RzLg0KDQojIyMgSW50cm9kdWN0aW9uDQpMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBQaGFzZWxsdXMgbm9uIGVsZWlmZW5kIG9kaW8sIGV1IHBvc3VlcmUgbWkuIE51bGxhIGVmZmljaXR1ciBhdCBlbGl0IGVnZXQgcnV0cnVtLiBJbiBtb2xlc3RpZSBwaGFyZXRyYSBsaWd1bGEgZGlnbmlzc2ltIGV1aXNtb2QuIEluIGF1Y3RvciBuaXNsIHZpdGFlIGVyb3MgdmVuZW5hdGlzIGNvbmRpbWVudHVtLiBTdXNwZW5kaXNzZSBjb25kaW1lbnR1bSB1bHRyaWNlcyBxdWFtIGV0IHZpdmVycmEuIEluIGFsaXF1ZXQgdG9ydG9yIG1heGltdXMgdG9ydG9yIGRpY3R1bSwgZXUgdGluY2lkdW50IG1ldHVzIGZyaW5naWxsYS4gUHJhZXNlbnQgZWdldCBjb25zZWN0ZXR1ciBkdWkuIENyYXMgZXQgbWV0dXMgaWFjdWxpcywgc2FnaXR0aXMgbGliZXJvIHNpdCBhbWV0LCBkaWduaXNzaW0gbmlzaS4gU3VzcGVuZGlzc2UgbWFsZXN1YWRhIGVsaXQgdm9sdXRwYXQsIHN1c2NpcGl0IHVybmEgZWdldCwgZmVybWVudHVtIGxhY3VzLg0KDQojIyMgSG93IHRvIGdldCBzdGFydGVkDQpMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBQaGFzZWxsdXMgbm9uIGVsZWlmZW5kIG9kaW8sIGV1IHBvc3VlcmUgbWkuIE51bGxhIGVmZmljaXR1ciBhdCBlbGl0IGVnZXQgcnV0cnVtLiBJbiBtb2xlc3RpZSBwaGFyZXRyYSBsaWd1bGEgZGlnbmlzc2ltIGV1aXNtb2QuIEluIGF1Y3RvciBuaXNsIHZpdGFlIGVyb3MgdmVuZW5hdGlzIGNvbmRpbWVudHVtLiBTdXNwZW5kaXNzZSBjb25kaW1lbnR1bSB1bHRyaWNlcyBxdWFtIGV0IHZpdmVycmEuIEluIGFsaXF1ZXQgdG9ydG9yIG1heGltdXMgdG9ydG9yIGRpY3R1bSwgZXUgdGluY2lkdW50IG1ldHVzIGZyaW5naWxsYS4gUHJhZXNlbnQgZWdldCBjb25zZWN0ZXR1ciBkdWkuIENyYXMgZXQgbWV0dXMgaWFjdWxpcywgc2FnaXR0aXMgbGliZXJvIHNpdCBhbWV0LCBkaWduaXNzaW0gbmlzaS4gU3VzcGVuZGlzc2UgbWFsZXN1YWRhIGVsaXQgdm9sdXRwYXQsIHN1c2NpcGl0IHVybmEgZWdldCwgZmVybWVudHVtIGxhY3VzLg==" + }, + { + "label": "Responses", + "markdown": "IyBSZXNwb25zZXMNClRoaXMgaXMgd2hlcmUgdXNlcnMgY291bGQgdGVsbCBjdXN0b21lcnMgYWdhaW4gaG93IGF3ZXNvbWUgb3VyIEFQSXMgYXJlIGFuZCB3aHkgdGhlc2UgYXJlIHRoZSByaWdodCBBUElzIGZvciB0aGVpciBwcm9kdWN0cy4NCg0KIyMjIEludHJvZHVjdGlvbg0KTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGhhc2VsbHVzIG5vbiBlbGVpZmVuZCBvZGlvLCBldSBwb3N1ZXJlIG1pLiBOdWxsYSBlZmZpY2l0dXIgYXQgZWxpdCBlZ2V0IHJ1dHJ1bS4gSW4gbW9sZXN0aWUgcGhhcmV0cmEgbGlndWxhIGRpZ25pc3NpbSBldWlzbW9kLiBJbiBhdWN0b3IgbmlzbCB2aXRhZSBlcm9zIHZlbmVuYXRpcyBjb25kaW1lbnR1bS4gU3VzcGVuZGlzc2UgY29uZGltZW50dW0gdWx0cmljZXMgcXVhbSBldCB2aXZlcnJhLiBJbiBhbGlxdWV0IHRvcnRvciBtYXhpbXVzIHRvcnRvciBkaWN0dW0sIGV1IHRpbmNpZHVudCBtZXR1cyBmcmluZ2lsbGEuIFByYWVzZW50IGVnZXQgY29uc2VjdGV0dXIgZHVpLiBDcmFzIGV0IG1ldHVzIGlhY3VsaXMsIHNhZ2l0dGlzIGxpYmVybyBzaXQgYW1ldCwgZGlnbmlzc2ltIG5pc2kuIFN1c3BlbmRpc3NlIG1hbGVzdWFkYSBlbGl0IHZvbHV0cGF0LCBzdXNjaXBpdCB1cm5hIGVnZXQsIGZlcm1lbnR1bSBsYWN1cy4NCg0KIyMjIEhvdyB0byBnZXQgc3RhcnRlZA0KTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGhhc2VsbHVzIG5vbiBlbGVpZmVuZCBvZGlvLCBldSBwb3N1ZXJlIG1pLiBOdWxsYSBlZmZpY2l0dXIgYXQgZWxpdCBlZ2V0IHJ1dHJ1bS4gSW4gbW9sZXN0aWUgcGhhcmV0cmEgbGlndWxhIGRpZ25pc3NpbSBldWlzbW9kLiBJbiBhdWN0b3IgbmlzbCB2aXRhZSBlcm9zIHZlbmVuYXRpcyBjb25kaW1lbnR1bS4gU3VzcGVuZGlzc2UgY29uZGltZW50dW0gdWx0cmljZXMgcXVhbSBldCB2aXZlcnJhLiBJbiBhbGlxdWV0IHRvcnRvciBtYXhpbXVzIHRvcnRvciBkaWN0dW0sIGV1IHRpbmNpZHVudCBtZXR1cyBmcmluZ2lsbGEuIFByYWVzZW50IGVnZXQgY29uc2VjdGV0dXIgZHVpLiBDcmFzIGV0IG1ldHVzIGlhY3VsaXMsIHNhZ2l0dGlzIGxpYmVybyBzaXQgYW1ldCwgZGlnbmlzc2ltIG5pc2kuIFN1c3BlbmRpc3NlIG1hbGVzdWFkYSBlbGl0IHZvbHV0cGF0LCBzdXNjaXBpdCB1cm5hIGVnZXQsIGZlcm1lbnR1bSBsYWN1cy4=" + }, + { + "label": "Errors", + "markdown": "IyBFcnJvcnMNClRoaXMgaXMgd2hlcmUgdXNlcnMgY291bGQgdGVsbCBjdXN0b21lcnMgYWdhaW4gaG93IGF3ZXNvbWUgb3VyIEFQSXMgYXJlIGFuZCB3aHkgdGhlc2UgYXJlIHRoZSByaWdodCBBUElzIGZvciB0aGVpciBwcm9kdWN0cy4NCg0KIyMjIEludHJvZHVjdGlvbg0KTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGhhc2VsbHVzIG5vbiBlbGVpZmVuZCBvZGlvLCBldSBwb3N1ZXJlIG1pLiBOdWxsYSBlZmZpY2l0dXIgYXQgZWxpdCBlZ2V0IHJ1dHJ1bS4gSW4gbW9sZXN0aWUgcGhhcmV0cmEgbGlndWxhIGRpZ25pc3NpbSBldWlzbW9kLiBJbiBhdWN0b3IgbmlzbCB2aXRhZSBlcm9zIHZlbmVuYXRpcyBjb25kaW1lbnR1bS4gU3VzcGVuZGlzc2UgY29uZGltZW50dW0gdWx0cmljZXMgcXVhbSBldCB2aXZlcnJhLiBJbiBhbGlxdWV0IHRvcnRvciBtYXhpbXVzIHRvcnRvciBkaWN0dW0sIGV1IHRpbmNpZHVudCBtZXR1cyBmcmluZ2lsbGEuIFByYWVzZW50IGVnZXQgY29uc2VjdGV0dXIgZHVpLiBDcmFzIGV0IG1ldHVzIGlhY3VsaXMsIHNhZ2l0dGlzIGxpYmVybyBzaXQgYW1ldCwgZGlnbmlzc2ltIG5pc2kuIFN1c3BlbmRpc3NlIG1hbGVzdWFkYSBlbGl0IHZvbHV0cGF0LCBzdXNjaXBpdCB1cm5hIGVnZXQsIGZlcm1lbnR1bSBsYWN1cy4NCg0KIyMjIEhvdyB0byBnZXQgc3RhcnRlZA0KTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGhhc2VsbHVzIG5vbiBlbGVpZmVuZCBvZGlvLCBldSBwb3N1ZXJlIG1pLiBOdWxsYSBlZmZpY2l0dXIgYXQgZWxpdCBlZ2V0IHJ1dHJ1bS4gSW4gbW9sZXN0aWUgcGhhcmV0cmEgbGlndWxhIGRpZ25pc3NpbSBldWlzbW9kLiBJbiBhdWN0b3IgbmlzbCB2aXRhZSBlcm9zIHZlbmVuYXRpcyBjb25kaW1lbnR1bS4gU3VzcGVuZGlzc2UgY29uZGltZW50dW0gdWx0cmljZXMgcXVhbSBldCB2aXZlcnJhLiBJbiBhbGlxdWV0IHRvcnRvciBtYXhpbXVzIHRvcnRvciBkaWN0dW0sIGV1IHRpbmNpZHVudCBtZXR1cyBmcmluZ2lsbGEuIFByYWVzZW50IGVnZXQgY29uc2VjdGV0dXIgZHVpLiBDcmFzIGV0IG1ldHVzIGlhY3VsaXMsIHNhZ2l0dGlzIGxpYmVybyBzaXQgYW1ldCwgZGlnbmlzc2ltIG5pc2kuIFN1c3BlbmRpc3NlIG1hbGVzdWFkYSBlbGl0IHZvbHV0cGF0LCBzdXNjaXBpdCB1cm5hIGVnZXQsIGZlcm1lbnR1bSBsYWN1cy4=" + }, + { + "label": "1", + "markdown": "VGhlIGhpc3Rvcnkgb2YgVGhlIEhpdGNoaGlrZXIncyBHdWlkZSB0byB0aGUgR2FsYXh5IGlzIG5vdyBzbyAKY29tcGxpY2F0ZWQgdGhhdCBldmVyeSB0aW1lIEkgdGVsbCBpdCBJIGNvbnRyYWRpY3QgbXlzZWxmLCBhbmQgd2hlbmV2ZXIgCkkgZG8gZ2V0IGl0IHJpZ2h0IEknbSBtaXNxdW90ZWQuIFNvIHRoZSBwdWJsaWNhdGlvbiBvZiB0aGlzIG9tbmlidXMgCmVkaXRpb24gc2VlbWVkIGxpa2UgYSBnb29kIG9wcG9ydHVuaXR5IHRvIHNldCB0aGUgcmVjb3JkIHN0cmFpZ2h0IC0gb3IgCmF0IGxlYXN0IGZpcm1seSBjcm9va2VkLiBBbnl0aGluZyB0aGF0IGlzIHB1dCBkb3duIHdyb25nIGhlcmUgaXMsIGFzIGZhciAKYXMgSSdtIGNvbmNlcm5lZCwgd3JvbmcgZm9yIGdvb2QuIAoKVGhlIGlkZWEgZm9yIHRoZSB0aXRsZSBmaXJzdCBjcm9wcGVkIHVwIHdoaWxlIEkgd2FzIGx5aW5nIGRydW5rIGluIGEgCmZpZWxkIGluIElubnNicnVjaywgQXVzdHJpYSwgaW4gMTk3MS4gTm90IHBhcnRpY3VsYXJseSBkcnVuaywganVzdCB0aGUgCnNvcnQgb2YgZHJ1bmsgeW91IGdldCB3aGVuIHlvdSBoYXZlIGEgY291cGxlIG9mIHN0aWZmIEdvc3NlcnMgYWZ0ZXIgCm5vdCBoYXZpbmcgZWF0ZW4gZm9yIHR3byBkYXlzIHN0cmFpZ2h0LCBvbiBhY2NvdW50IG9mIGJlaW5nIGEgCnBlbm5pbGVzcyBoaXRjaGhpa2VyLiBXZSBhcmUgdGFsa2luZyBvZiBhIG1pbGQgaW5hYmlsaXR5IHRvIHN0YW5kIHVwLiAKCkkgd2FzIHRyYXZlbGluZyB3aXRoIGEgY29weSBvZiB0aGUgSGl0Y2ggSGlrZXIgcyBHdWlkZSB0byBFdXJvcGUgYnkgCktlbiBXYWxzaCwgYSB2ZXJ5IGJhdHRlcmVkIGNvcHkgdGhhdCBJIGhhZCBib3Jyb3dlZCBmcm9tIHNvbWVvbmUuIAoKSW4gZmFjdCwgc2luY2UgdGhpcyB3YXMgMTk3MSBhbmQgSSBzdGlsbCBoYXZlIHRoZSBib29rLCBpdCBtdXN0IGNvdW50IGFzIApzdG9sZW4gYnkgbm93LiBJIGRpZG4ndCBoYXZlIGEgY29weSBvZiBFdXJvcGUgb24gRml2ZSBEb2xsYXJzIGEgRGF5IChhcyAKaXQgdGhlbiB3YXMpIGJlY2F1c2UgSSB3YXNuJ3QgaW4gdGhhdCBmaW5hbmNpYWwgbGVhZ3VlLiAKCk5pZ2h0IHdhcyBiZWdpbm5pbmcgdG8gZmFsbCBvbiBteSBmaWVsZCBhcyBpdCBzcHVuIGxhemlseSB1bmRlcm5lYXRoIAptZS4gSSB3YXMgd29uZGVyaW5nIHdoZXJlIEkgY291bGQgZ28gdGhhdCB3YXMgY2hlYXBlciB0aGFuIApJbm5zYnJ1Y2ssIHJldm9sdmVkIGxlc3MgYW5kIGRpZG4ndCBkbyB0aGUgc29ydCBvZiB0aGluZ3MgdG8gbWUgdGhhdCAKSW5uc2JydWNrIGhhZCBkb25lIHRvIG1lIHRoYXQgYWZ0ZXJub29uLiBXaGF0IGhhZCBoYXBwZW5lZCB3YXMgCnRoaXMuIEkgaGFkIGJlZW4gd2Fsa2luZyB0aHJvdWdoIHRoZSB0b3duIHRyeWluZyB0byBmaW5kIGEgcGFydGljdWxhciAKYWRkcmVzcywgYW5kIGJlaW5nIHRob3JvdWdobHkgbG9zdCBJIHN0b3BwZWQgdG8gYXNrIGZvciBkaXJlY3Rpb25zIApmcm9tIGEgbWFuIGluIHRoZSBzdHJlZXQuIEkga25ldyB0aGlzIG1pZ2h0bid0IGJlIGVhc3kgYmVjYXVzZSBJIGRvbid0IApzcGVhayBHZXJtYW4sIGJ1dCBJIHdhcyBzdGlsbCBzdXJwcmlzZWQgdG8gZGlzY292ZXIganVzdCBob3cgbXVjaCAKZGlmZmljdWx0eSBJIHdhcyBoYXZpbmcgY29tbXVuaWNhdGluZyB3aXRoIHRoaXMgcGFydGljdWxhciBtYW4uIApHcmFkdWFsbHkgdGhlIHRydXRoIGRhd25lZCBvbiBtZSBhcyB3ZSBzdHJ1Z2dsZWQgaW4gdmFpbiB0byAKdW5kZXJzdGFuZCBlYWNoIG90aGVyIHRoYXQgb2YgYWxsIHRoZSBwZW9wbGUgaW4gSW5uc2JydWNrIEkgY291bGQgaGF2ZSAKc3RvcHBlZCB0byBhc2ssIHRoZSBvbmUgSSBoYWQgcGlja2VkIGRpZCBub3Qgc3BlYWsgRW5nbGlzaCwgZGlkIG5vdCAKc3BlYWsgRnJlbmNoIGFuZCB3YXMgYWxzbyBkZWFmIGFuZCBkdW1iLiBXaXRoIGEgc2VyaWVzIG9mIHNpbmNlcmVseSAKYXBvbG9nZXRpYyBoYW5kIG1vdmVtZW50cywgSSBkaXNlbnRhbmdsZWQgbXlzZWxmLCBhbmQgYSBmZXcgCgoKCm1pbnV0ZXMgbGF0ZXIsIG9uIGFub3RoZXIgc3RyZWV0LCBJIHN0b3BwZWQgYW5kIGFza2VkIGFub3RoZXIgbWFuIAp3aG8gYWxzbyB0dXJuZWQgb3V0IHRvIGJlIGRlYWYgYW5kIGR1bWIsIHdoaWNoIHdhcyB3aGVuIEkgYm91Z2h0IAp0aGUgYmVlcnMuIAoKSSB2ZW50dXJlZCBiYWNrIG9udG8gdGhlIHN0cmVldC4gSSB0cmllZCBhZ2Fpbi4gCgpXaGVuIHRoZSB0aGlyZCBtYW4gSSBzcG9rZSB0byB0dXJuZWQgb3V0IHRvIGJlIGRlYWYgYW5kIGR1bWIgCmFuZCBhbHNvIGJsaW5kIEkgYmVnYW4gdG8gZmVlbCBhIHRlcnJpYmxlIHdlaWdodCBzZXR0bGluZyBvbiBteSAKc2hvdWxkZXJzOyB3aGVyZXZlciBJIGxvb2tlZCB0aGUgdHJlZXMgYW5kIGJ1aWxkaW5ncyB0b29rIG9uIGRhcmsgYW5kIAptZW5hY2luZyBhc3BlY3RzLiBJIHB1bGxlZCBteSBjb2F0IHRpZ2h0bHkgYXJvdW5kIG1lIGFuZCBodXJyaWVkIApsdXJjaGluZyBkb3duIHRoZSBzdHJlZXQsIHdoaXBwZWQgYnkgYSBzdWRkZW4gZ3VzdGluZyB3aW5kLiBJIApidW1wZWQgaW50byBzb21lb25lIGFuZCBzdGFtbWVyZWQgYW4gYXBvbG9neSwgYnV0IGhlIHdhcyBkZWFmIAphbmQgZHVtYiBhbmQgdW5hYmxlIHRvIHVuZGVyc3RhbmQgbWUuIFRoZSBza3kgbG91cmVkLiBUaGUgCnBhdmVtZW50IHNlZW1lZCB0byB0aXAgYW5kIHNwaW4uIElmIEkgaGFkbid0IGhhcHBlbmVkIHRoZW4gdG8gZHVjayAKZG93biBhIHNpZGUgc3RyZWV0IGFuZCBwYXNzIGEgaG90ZWwgd2hlcmUgYSBjb252ZW50aW9uIGZvciB0aGUgZGVhZiAKd2FzIGJlaW5nIGhlbGQsIHRoZXJlIGlzIGV2ZXJ5IGNoYW5jZSB0aGF0IG15IG1pbmQgd291bGQgaGF2ZSAKY3JhY2tlZCBjb21wbGV0ZWx5IGFuZCBJIHdvdWxkIGhhdmUgc3BlbnQgdGhlIHJlc3Qgb2YgbXkgbGlmZSB3cml0aW5nIAp0aGUgc29ydCBvZiBib29rcyBmb3Igd2hpY2ggS2Fma2EgYmVjYW1lIGZhbW91cyBhbmQgZHJpYmJsaW5nLiAK" + } + ] + } + }, + "color": { + "main": { + "ink": "#0d0b0b", + "fill": "#92be7a", + "primary": "#e1f3ff", + "status": { + "info": "#8d9dda", + "success": "#00eb4d", + "error": "#901e09", + "warning": "#b5a260" + } + }, + "hero": { + "ink": "#000000", + "fill": "#E0F7FF", + "primary": "" + }, + "illustration": { + "ink": "#14648e", + "fill": "#bad8e7", + "primary": "" + }, + "header": { + "ink": "#000000", + "fill": "#FFFFFF", + "primary": "" + }, + "footer": { + "ink": "#FFFFFF", + "fill": "#035B96", + "primary": "" + } + }, + "fonts": { + "body": { + "kind": "google-web-font", + "value": "Ubuntu" + }, + "code": { + "kind": "google-web-font", + "value": "Source+Code+Pro" + }, + "special": { + "kind": "google-web-font", + "value": "Ubuntu" + } + } +} \ No newline at end of file diff --git a/content/nms/acm/getting-started/overview.md b/content/nms/acm/getting-started/overview.md new file mode 100644 index 000000000..fbad547df --- /dev/null +++ b/content/nms/acm/getting-started/overview.md @@ -0,0 +1,111 @@ +--- +description: Learn how to get up and running with F5 NGINX Management Suite API Connectivity + Manager. +docs: DOCS-939 +tags: +- docs +title: Get Started +toc: true +weight: 10 +--- + + + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Objectives + +By completing the guides in this Quick Start series, you can easily get up and running with API Connectivity Manager. + +This series covers the following topics: + +1. Setting up an environment with [API Gateway]({{< relref "add-api-gateway" >}}) & [Developer Portal]({{< relref "add-devportal" >}}) clusters. +2. Onboarding F5 NGINX Plus instances onto the clusters. +3. [Publishing an API proxy]({{< relref "publish-api-proxy" >}}) with or without an OpenAPI spec. + +--- + +## Requirements + +To complete the instructions in this series, you must meet the following requirements: + +1. [Install API Connectivity Manager and Developer Portal]({{< relref "/nim/deploy/_index.md" >}}) on [separate hosts]({{< relref "tech-specs.md" >}}). +2. [Install a supported version of NGINX Plus]({{< relref "tech-specs.md" >}}) on one or more hosts to serve as the API Gateway. +3. [Install the `njs` module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) on your NGINX data plane and Dev Portal hosts. +4. You have SSH access and `sudo` permissions for the NGINX data plane host(s). +5. You have an API Connectivity Manager user account with permission to READ, CREATE, UPDATE, and DELETE on the following API Connectivity Manager features: + + - Infrastructure + - Services + +--- + +## How to Access the REST API + +{{< include "acm/how-to/access-acm-api" >}} + +For example: + +```shell +curl --location --request POST 'https://{{nms-fqdn}}/api/acm/v1/services/workspaces/{{workspaceName}}' \ +--header 'Accept: application/json' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer ' \ +--data-raw '' +``` + +--- + +## Variables + +The examples provided in these guides use a set of common variables. +You should replace these variables -- or define a set of values for them in your API client -- with information relevant to your environment before trying to use them. +The variables used in the examples are described in the table below. + + +{{< bootstrap-table "table table-hover table-bordered" >}} +|Variable | Definition | +|---|-------| +| `nms-fqdn`| The fully-qualified domain name (FQDN) or IP address of the host running NGINX Management Suite.
    This is also referred to as the "management plane" host. | +| `backendIp` | The IP address or hostname of a backend server running an API service. | +| `nginxInstance` | The IP address or hostname of an NGINX data plane instance. | +| `devPortalIp` | The IP address or hostname of the instance hosting the Developer Portal. | +| `username` | Your account username. | +| `password` | Your account password. | +| `instanceGroupName` | The name of the API Gateway. This name is recorded as an Instance Group name by the NGINX Agent. | +| `infraWorkspaceName` | The name of the Infrastructure Workspace that you want to work in. | +| `proxyWorkspaceName` | The name of the Service Workspace that you want to work in. | +| `proxyName` | The name of the Proxy that you want to create, read, update, or delete. | +| `environmentName` | The name of the Environment that you want to work in. | +| `environmentHostname` | The hostname of the API Gateway. | +| `devPortalName` | The resource name of the Developer Portal Proxy. | +| `portalDocsName` | The resource name of the API Docs. | +| `portalInstanceGroupName` | The resource name of the Developer Portal. | +| `portalClusterHostname` | The hostname for the Developer Portal. | +| `clusterName` | The proxy cluster name for the Developer Portal or API Gateway. | + +{{< /bootstrap-table >}} + diff --git a/content/nms/acm/getting-started/publish-api-proxy.md b/content/nms/acm/getting-started/publish-api-proxy.md new file mode 100644 index 000000000..7c52353c9 --- /dev/null +++ b/content/nms/acm/getting-started/publish-api-proxy.md @@ -0,0 +1,498 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to publish + an API Proxy. +docs: DOCS-923 +tags: +- docs +title: Publish an API Proxy +toc: true +weight: 300 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +In API Connectivity Manager, **Services** represent your Backend APIs. +**Proxies** represent the NGINX reverse proxy that routes traffic to your backend service and to the Developer Portal. +This guide provides instructions and examples for publishing an API and a Developer Portal by using the REST API. + +### Before You Begin + +You should complete the following Quick Start Guides before proceeding with the steps in this guide: + +1. [Set Up an API Gateway Environment]({{< relref "add-api-gateway" >}}) +1. [Set Up a Developer Portal Environment]({{< relref "add-devportal" >}}) + +## Create a service workspace +Services workspaces is a logical grouping of APIs. A user can created multiple workspaces that match an organizational structure. + +{{}} + +{{%tab name="UI"%}} + +1. Select the **Services** option on the left hand menu. +1. Select the **Create Workspace** button. +1. Enter a name. +1. (Optional) Provide a description of the workspace. +1. (Optional) Select the **Contact Information** check box to provide contact details. +1. Select the **Create** button. + + +{{%/tab%}} +{{%tab name="API"%}} + + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST| `/services/workspaces`| + +{{}} + + +```json +{ + "name": "{{proxyWorkspaceName}}", + "metadata": { + "description": "Petstore Team Workspace" + }, + "contactDetails": { + "adminEmail": "admin@example.com", + "adminName": "I.M. Admin", + "adminPhone": "555 123 1234" + } +} +``` + +{{%/tab%}} +{{}} + + + + + +## Publish API Proxy without OpenAPI Spec {#set-up-api-proxy} + +An API proxy connects the backend services to the API-Gateway. + +{{}} + +{{%tab name="UI"%}} + +After creating the workspace, you can select **Publish API Proxy** or open the previously created workspace. + +On the Publish API Proxy window: +### Backend Service + +1. Type a name for the backend service. +1. Type the **Service Target Hostname**; this can be an IP or FQDN. +1. For the **Service Target Transport Protocol**, if your backend service is using gRPC, then select gRPC. +1. Type the **Service Target Port**, or use the arrow buttons to increase or decrease the port number. + +### API Proxy + +1. Type a name for the API Proxy. +1. Select No in the **Use an OpenAPI spec** option. +1. Select the **Gateway Proxy Hostname from** the menu. +{{< note >}}If this field is disabled, check the job status of your environment on the infrastructure workspace page.{{< /note >}} + +### Ingress + +1. Enter the Base Path that you wish to route traffic to. +1. Type the version of your API. +1. Select **Publish**. + +### Confirm Setup + +1. Open a terminal application. +1. Run the following command: + + ```curl + curl -k -X GET "https://gateway-proxy-hostname/version/basepath" + ``` + +1. If your proxy is set up correctly, you can send traffic. + +{{< note >}}By default the ingress append rule is set to `PREFIX` so your request must be in the form of `version/basepath` {{< /note >}} + + + + + +{{%/tab%}} +{{%tab name="API"%}} +After creating the service workspace, you can select **Publish API Proxy**, or you can follow these steps: + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST | `/services/workspaces/{{proxyWorkspaceName}}/proxies`| + +{{}} + + + + +The basic configuration below creates an API Proxy to a backend service. + +```json +{ + "name": "{{proxyName}}", + "metadata": { + "description": "Swagger Petstore Proxy" + }, + "version": "v1", + "proxyConfig": { + "hostname": "{{environmentHostname}}", + "ingress": { + "basePath": "/v1" + }, + "backends": [ + { + "serviceName": "petstore-api-svc", + "serviceTargets": [ + { + "hostname": "petstore.example.com" + } + ] + } + ] + } +} +``` + +{{%/tab%}} +{{}} + + + +## Publish API Proxy with OpenAPI Spec {#publish-api-proxy-with-spec} +{{< include "acm/openapi-support" >}} + +### Uploading an OAS Schema +OAS Schemas can be uploaded to API Connectivity Manager and stored for use as references for *Proxy* deployments. +The routes contained in the OAS Schema will be used to create the routes for your *Proxy* + +### Creating a Proxy with an OAS +After you have uploaded your OAS Schema as an *API Doc*, you can then reference that *API Doc* in your *Proxy* deployments using the `specRef` parameter in the JSON payload. +Using the `specRef` will then associate that OAS Schema in API Connectivity Manager and allow API Connectivity Manager to create your routes from the information contained in the OAS Schema. + +### Extended support for OAS in API Connectivity Manager +API Connectivity Manager now allows you to set up an API gateway using Open API Specification by supporting the creation of *Backends* (upstream servers) from the supplied OAS using an API Connectivity Manager specific *x-* extension in your OAS document. +API Connectivity Manager now also supports server URL templating in the global URL(s). + +
    +Example JSON + +```json +"servers": [ + { + "url": "https://{server}.example.com/api/{version}", + "variables": { + "version": { + "default": "v1" + }, + "server": { + "default": "staging" + } + }, + "x-acm-append-rule": "NONE", + "x-acm-strip-basepath": false, + "x-acm-backends": [ + { + "serviceName": "pets-backend", + "serviceVersion": "pets-backend-v1", + "serviceLabel": "default", + "contextRoot": "/dev", + "upstreams": [ + { + "url": "https://gecho1.null.ie", + "maxFails": 10, + "maxConnections": 5, + "failTimeout": "5s", + "slowStart": "10s" + }, + { + "url": "https://gecho2.null.ie", + "maxFails": 5, + "maxConnections": 8, + "failTimeout": "15s", + "slowStart": "3s" + }, + { + "url": "https://gecho3.null.ie", + "maxFails": 7, + "maxConnections": 33, + "failTimeout": "35s", + "slowStart": "1s" + } + ] + } + ] + } +], +``` + +
    + +  + + +### Server URL Templating + +```json +"servers": [ + { + "url": "https://{server}.example.com/api/{version}", + "variables": { + "version": { + "default": "v1" + }, + "server": { + "default": "staging" + } + }, +``` + +In the above section, we can see how server URL templating will make substitutions with a matching value from the variables section of the server object in the specification. +Each placeholder in the URL *must* have a matching variable in the variables section or the validation will fail and return an error. + +### Creating Backends +This section explains how to create a backend target for our API Gateway configuration, a Backend is a collection of upstream servers bundled under one "Service label". +An API Gateway can have multiple *Backends* which can each contain multiple upstream servers. + +```json +"x-acm-backends": [ + { + "serviceName": "pets-backend", + "serviceVersion": "pets-backend-v1", + "serviceLabel": "default", + "contextRoot": "/dev", + "upstreams": [ + { + "url": "https://server.example.com", + "maxFails": 10, + "maxConnections": 5, + "failTimeout": "5s", + "slowStart": "10s" + }, +``` + +In the above example, we can see how to create a single *Backend* with a single upstream server. + +{{}} + +| Variable | Purpose | Required | Default | Context | +|----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------|----------| +| serviceName | provides a human-readable identifier to the Backend | true | none | Backend | +| serviceVersion | provides some version metadata should it be required | false | none | Backend | +| serviceLabel | provides a means to target this backend from this and other API Gateway deployments | true | default | Backend | +| contextRoot | sets the service root path for the upstream servers, i.e. /dev would mean that all requests proxied to /api/v1 would be proxied to /dev/api/v1 on the upstream servers. | false | / | Backend | +| upstreams | array of upstream servers, requires at least one server to be provided. | true | none | Backend | +| url | the URL of the upstream server, a port should be provided if using non-standard scheme -> port mappings, i.e. http:80, https:443 | true | none | Upstream | +| maxFails | sets the number of unsuccessful attempts to communicate with the server that should happen in the duration set by the `fail_timeout` parameter to consider the server unavailable for a duration also set by the `fail_timeout` parameter | false | 0 | Upstream | +| maxConnections | limits the maximum `_number_` of simultaneous active connections to the proxied server | false | 0 | Upstream | +| failTimeout | sets the time during which the specified number of unsuccessful attempts to communicate with the server should happen to consider the server unavailable and the period of time the server will be considered unavailable. | false | 10s | Upstream | +| slowStart | sets the `_time during which the server will recover its weight from zero to a nominal value, when an unhealthy server becomes healthy, or when the server becomes available after being unavailable. | false | none | Upstream | + +{{}} + +All values supplied in the OAS Specification are only modifiable through the OAS Specification and not through the API or UI, this means that the OAS Specification is the source of truth for all values supplied within it. +If values are omitted from the OAS Schema then they may be added or modified via the API or UI. + +### Proxy Basepath +It is possible to modify the basepath provided using two additional extensions: +`x-acm-append-rule` and `x-acm-strip-basepath`. + +`x-acm-append-rule` is a legacy configuration option that was used to either prepend or append the version field from the `info` section to your API basepath, going forward the basepath should be added explicitly to the global server URL section in exactly the manner in which it is to be used, for example: + +`x-acm-append-rule` defaults to `NONE` and the version field in the `info` section is only used as the document version metadata in favor of explicitly adding the version to the server URL. `x-acm-append-rule` should ONLY be used for legacy deployments that used a value other than `NONE` + +`x-acm-strip-basepath` is a boolean value that denotes whether to strip the basepath from the request URI before proxying the request to the backend servers. + +{{}} + +| Incoming URI | basePath | stripBasepath | Context Root | Proxied URI | +|-----------------------|----------|---------------|--------------|------------------------| +| /api/v1/customers | /api/v1 | false | / | /api/v1/customers | +| /api/v1/customers | /api/v1 | true | / | /customers | +| /api/v1/customers/123 | /api/v1 | true | / | /customers/123 | +| /api/v1/customers | /api/v1 | false | /prod | /prod/api/v1/customers | +| /api/v1/customers | /api/v1 | true | /prod | /prod/customers | + +{{}} + +- When you upload an OpenAPI spec, API Connectivity Manager automatically generates a name for the API Docs object using the following format: + +`"info.title"-"info.version"` + +- The string is "URL-ized", meaning any whitespace gets converted to dashes (`-`) and all letters are lowercase. +If we used the OpenAPI example [Petstore API](https://github.com/OAI/OpenAPI-Specification/blob/main/tests/v3.0/pass/petstore.yaml), the auto-generated name for the API Docs would be `petstore-v1`. + +{{}} + +{{%tab name="UI"%}} + +1. Enter a name for the backend service. +1. Type the **Service Target Hostname**; this can be an IP or FQDN. +1. In the **Service Target Transport Protocol** menu, select gRPC if your backend service uses gRPC. +1. Enter the *Service Target Port*, or use the arrow buttons to increase or decrease the port number. + +### API Proxy + +1. Enter a name for the API Proxy. +1. Select Yes in the **Use an OpenAPI spec** option. +1. Select the **Add API Spec** button. +1. Select the **Browse** button and select a YAML or JSON file. +1. After the file uploads you can either select or search for your API spec. +1. Select **Publish**. + +### Ingress +Populated from API Specification and are read-only + +### Confirm Setup + +1. Open a terminal application. + +1. Run the following command: + + ```curl + curl -k -X GET "https://gateway-proxy-hostname/version/basepath" + ``` + +{{< note >}} By default the ingress append rule is set to `NONE` when using an OAS Schema so your request must match the `basepath` you have supplied as part of your Global Server URL. {{< /note >}} + +{{%/tab%}} +{{%tab name="API"%}} + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST | `/services/workspaces/{{proxyWorkspaceName}}/api-docs`| +| POST | `/services/workspaces/{{proxyWorkspaceName}}/proxies`| + +{{}} + + +Take the steps below to add an API Proxy with an OpenAPI spec using either version 3.0.x or 3.1. + +1. Send a POST request containing the OpenAPI spec for your API to the `api-docs` endpoint to upload it. + + ```json + { + "info": { + "version": "1.0.0", + "title": "Swagger Petstore", + "license:" { + "name": "MIT" + } + }, + "openapi": "3.0.0", + "paths": {...} + } + ``` + +1. Send a POST request to the `proxies` endpoint to create a new API Proxy. In this example, `specRef` contains the name that API Connectivity Manager assigned to the API Docs object: `petstore-1`. + + ```json + { + "name": "{{proxyName}}", + "metadata": { + "description": "Swagger Petstore API" + }, + "version": "v1", + "specRef": "petstore-1", + "proxyConfig": { + "hostname": "{{environmentHostname}}", + "ingress": { + "basePath": "/v1" + }, + "backends": [ + { + "serviceName": "petstore-api-svc", + "serviceTargets": [ + { + "hostname": "petstore.example.com" + } + ] + } + ] + } + } + ``` + +{{%/tab%}} +{{}} + +## Publish an API Doc to Developer Portal +Next, you can publish API Docs to your Developer Portal. + +API Connectivity Manager uses the `portalConfig.hostname` setting to connect your Dev Portal Proxy to the Developer Portal. +You should define this field using the hostname that you assigned to the Developer Portal in the [Set Up a Developer Portal]({{< relref "add-devportal" >}}) guide. + +{{}} + + +{{%tab name="UI"%}} + +Refer to [Publish API Proxy with OpenAPI Spec](#publish-api-proxy-with-spec). + +1. Select the **Also publish API to developer portal** option +1. Select the **Portal Proxy Hostname**. +1. (Optional) Enter a category if required. +1. Select **Publish** + +Open the Developer Portal and you should see the API doc is now displayed on the page. + +{{%/tab%}} +{{%tab name="API"%}} + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| PUT | `/services/workspaces/{{proxyWorkspaceName}}/proxies/{{proxyName}}`| + +{{}} + + + +The example below adds the Developer Portal to the same API Proxy that you created in the [previous section](#set-up-api-proxy). + +```json +{ + "name": "{{proxyName}}", + "specRef": "petstore-1", + "version": "v1", + "proxyConfig": { + "hostname": "{{environmentHostname}}", + "ingress": { + "basePath": "/v1" + }, + "backends": [ + { + "serviceName": "petstore-api-svc", + "serviceTargets": [ + { + "hostname": "petstore.example.com" + } + ] + } + ] + }, + "portalConfig": { + "hostname": "{{portalClusterHostname}}" + } +} +``` + +{{%/tab%}} +{{}} diff --git a/content/nms/acm/how-to/_index.md b/content/nms/acm/how-to/_index.md new file mode 100644 index 000000000..e633dd278 --- /dev/null +++ b/content/nms/acm/how-to/_index.md @@ -0,0 +1,6 @@ +--- +description: "Task-oriented topics that focus on how to use F5 NGINX Management Suite API Connectivity Manager." +title: How-To Guides +weight: 500 +url: /nginx-management-suite/acm/how-to/ +--- \ No newline at end of file diff --git a/content/nms/acm/how-to/backup-recovery.md b/content/nms/acm/how-to/backup-recovery.md new file mode 100644 index 000000000..957e82830 --- /dev/null +++ b/content/nms/acm/how-to/backup-recovery.md @@ -0,0 +1,308 @@ +--- +title: "Back up and recovery" +toc: true +docs: "DOCS-1668" +--- + +## Overview + +F5 NGINX Management Suite includes several scripts for backing up and restoring the configuration files, secrets, and databases used by the platform. + +{{}}The backup and recovery scripts are provided for reference and may need to be changed for your deployment.{{}} + +--- + +## NGINX Management Suite and API Connectivity Manager deployed in a Virtual Machine or Bare Metal + +### Before you begin + +To complete the instructions in this guide, you need the following: + +- An installed version of Instance Manager +- An installed version of API Connectivity Manager +- Instance Manager versions older than 2.15.0 will require an installed version of SQLite. Refer to the [Install SQLite]({{< relref "/nim/admin-guide/maintenance/sqlite-installation.md" >}}) guide for installation instructions. +- The NGINX Management Suite services must be running: + + ```shell + sudo systemctl start nms + ``` + +### Make scripts executable + +To run the backup and restore scripts, you need to set their permissions to make them executable. + +1. Open a secure shell (SSH) connection to the NGINX Management Suite host and log in. +1. Change to the directory where the scripts are located: + + ```shell + cd /etc/nms/scripts + ``` + +1. Run the following commands to make the scripts executable: + + ```shell + sudo chmod +x backup.sh + sudo chmod +x restore.sh + sudo chmod +x backup-acm.sh + sudo chmod +x restore-acm.sh + sudo chmod +x support-package.sh + ``` + +### Include module data + +By default, the data for API Connectivity Manager isn't included in the backup. + +To back up module data, follow these steps: + +1. Open a secure shell (SSH) connection to the NGINX Management Suite host and log in. +1. Change to the directory where the scripts are located: + + ```shell + cd /etc/nms/scripts + ``` +1. Edit the `backup.sh` and `restore.sh` scripts and uncomment the commands in the relevant sections: + + In **backup.sh**, uncomment the following lines: + + ```shell + ## Back up API Connectivity Manager + # Uncomment the following line to back up API Connectivity Manager. + ACM_ACTIVE=$(systemctl is-active --quiet nms-acm) + IS_ACM_ACTIVE=$? + if [ $IS_ACM_ACTIVE -ne 0 ]; then + echo "You need to start the required NGINX Management Suite + services before running the backup script." + echo "Please ensure the following nms service is running:" + echo "nms-acm" + exit 1 + fi + ``` + + ```shell + ## Back up API Connectivity Manager + # Uncomment the following line to back up API Connectivity Manager. + ./backup-acm.sh + ``` + + In **restore.sh**, uncomment the following lines: + + ```shell + ## Back up API Connectivity Manager + # Uncomment the following line to back up API Connectivity Manager. + ACM_ACTIVE=$(systemctl is-active --quiet nms-acm) + IS_ACM_ACTIVE=$? + if [ $IS_ACM_ACTIVE -eq 0 ]; then + echo "You need to stop the required NGINX Management Suite + services before running the restore script." + echo "Please ensure the following nms service is stopped:" + echo "nms-acm" + exit 1 + fi + ``` + + ```shell + ## Restore the API Connectivity Manager database. + # Uncomment the following line to restore API Connectivity Manager. + ./restore-acm.sh + ``` + +### Back up and restore NGINX Management Suite and API Connectivity Manager + +To back up the NGINX Management Suite configuration files, secrets, and databases: + +1. Open a secure shell (SSH) connection to the NGINX Management Suite host and log in. +1. To back up NGINX Management Suite, run the following commands: + + ```shell + cd /etc/nms/scripts + sudo ./backup.sh + ``` + + The backup is saved to a tarball file similar to the following example: `/tmp/nms-backup-.tgz` + +To restore NGINX Management Suite: + +1. Open a secure shell (SSH) connection to the NGINX Management Suite host and log in. +1. To restore NGINX Management Suite, run the following commands: + + ```shell + cd /etc/nms/scripts + sudo ./restore.sh /tmp/nms-backup-.tgz + ``` +--- + +## NGINX Management Suite and API Connectivity Manager deployed in a Kubernetes Cluster + +### Before you begin + +To complete the instructions in this guide, you need the following: + +- An installed version of NGINX Management Suite and Instance Manager +- An installed version of API Connectivity Manager +- Instance Manager versions older than 2.15.0 will require an installed version of SQLite. Refer to the [Install SQLite]({{< relref "/nim/admin-guide/maintenance/sqlite-installation.md" >}}) guide for installation instructions. + + + + +- Root Access + + To back up and restore the NGINX Management Suite on Kubernetes, run the scripts as a superuser with `sudo`. These scripts use the `kubectl` command to interact with the Kubernetes API. It is necessary to ensure the target Kubernetes cluster is accessible to the root user. + + To confirm that the root user has access to the Kubernetes API, run the following command: + + ```shell + sudo kubectl -n nms get pods + ``` + + If the result is error-free and the output is the list of currently running pods/nodes the root user has the required access. + + If the root user does not have the required access, you will need to configure the root user to have Kubernetes API access, or provide the script with the location of the Kubernetes configuration via the environment variable `KUBECONFIG`. For example: + + ```shell + KUBECONFIG=/etc/kubernetes/admin.conf + ``` + + In the example above, `/etc/kubernetes/admin.conf` is the default configuration location of a Kubernetes cluster. If the configuration location is different for the target Kubernetes cluster, update the location accordingly. + +- Utility pod + + To back up and restore NGINX Management Suite in a Kubernetes cluster, you need to install the `utility` pod in your Kubernetes cluster. For each module you want to back up and restore, you need to configure the `utility` pod accordingly: + + 1. Update your [Helm Deployment values.yaml file]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md#configure-chart" >}}), add the `utility: true` line under `global` to enable the utility pod, and the required sections under `nmsModules` to back up and restore API Connectivity Manager. Example below: + + ```yaml + global: + utility: true + nmsModules : + nms—acm: + enabled: true + addClaimsToUtility: true + ``` + + 1. [Upgrade your NGINX Management Suite deployment]({{< relref "/nim/deploy/kubernetes/deploy-using-helm#helm-upgrade-nim" >}}) to apply the changes. + + 1. Download the NGINX Management Suite Helm chart for your currently installed version of NGINX Management Suite: + + ```shell + helm repo add nginx-stable https://helm.nginx.com/stable + helm repo update + helm pull nginx-stable/nms + tar zxvf nms-.tgz + ``` + +### Back up NGINX Management Suite and API Connectivity Manager + +To back up NGINX Management Suite deployed in a Kubernetes cluster, follow these steps: + +1. Copy the backup script `k8s-backup.sh` extracted from `nms-.tgz` to your working directory: + + ```shell + cp nms-/charts/nms-hybrid/backup-restore/k8s-backup.sh . + ``` + +1. Make the scripts executable: + + ```shell + chmod +x k8s-backup.sh + ``` + +1. Run the backup script: + + ```shell + ./k8s-backup.sh + ``` + + {{< note >}}The backup script does not need the `utility` pod or `sudo` permissions to create a backup.{{< /note >}} + +1. The command will ask for the NGINX Management Suite namespace. The script will create a backup archive in the same directory called `k8s-backup-.tar.gz`. + +### Full restoration to the same Kubernetes Cluster + +To restore NGINX Management Suite and the installed modules deployed in the same Kubernetes cluster, follow these steps: + +1. Copy the restore script `k8s-restore.sh` extracted from `nms-.tgz` to your working directory: + + - For NGINX Management Suite and API Connectivity Manager, copy `k8s-restore.sh` from the `nms-/charts/nms-hybrid/backup-restore/` directory. + + ```shell + cp nms-/nms/charts/nms-hybrid/backup-restore/k8s-restore.sh . + ``` + +1. Make the scripts executable: + + ```shell + chmod +x k8s-restore.sh + ``` + +1. Copy your `k8s-backup-.tar.gz` file to the same directory as the `k8s-restore.sh` script. + +1. Run the restore script: + + ```shell + sudo KUBECONFIG=/etc/kubernetes/admin.conf ./k8s-restore.sh -i k8s-backup-.tar.gz -r + ``` + + In the command above, `/etc/kubernetes/admin.conf` is the default configuration location of a Kubernetes cluster. If the configuration location is different for the target Kubernetes cluster, update the command accordingly. + + + {{< note >}}The restore script [needs root access]({{< relref "/nms/acm/how-to/backup-recovery.md#root-access" >}}) to Kubernetes for the restore operation.{{< /note >}} + +1. The script will ask for the NGINX Management Suite namespace. Once the namespace has been provided, the script will use the specified backup archive. + + {{< note >}}The script will use the `utility` pod to access all the mounted volumes to restore database directories and core secrets; and `kubectl` to restore the Kubernetes configmaps and secrets. Before starting the restoration, the script will stop all service pods and start the `utility` pod. After finishing the restore, it will stop the `utility` pod and start all service pods.{{< /note >}} + + +### Data-only restoration to a different Kubernetes Cluster + +To restore NGINX Management Suite and the installed modules into a different Kubernetes cluster, follow these steps: + +1. Copy the restore script `k8s-restore.sh` extracted from `nms-.tgz` to your working directory: + + - For NGINX Management Suite and API Connectivity Manager, copy `k8s-restore.sh` from the `nms-/charts/nms-hybrid/backup-restore/` directory. + + ```shell + cp nms-/nms/charts/nms-hybrid/backup-restore/k8s-restore.sh . + ``` + +1. Make the scripts executable: + + ```shell + chmod +x k8s-restore.sh + ``` + +1. Copy your `k8s-backup-.tar.gz` file to the same directory as the `k8s-restore.sh` script. + +1. Run the restore script: + + ```shell + sudo KUBECONFIG=/etc/kubernetes/admin.conf ./k8s-restore.sh -i k8s-backup-.tar.gz -r -d + ``` + + In the command above, `/etc/kubernetes/admin.conf` is the default configuration location of a Kubernetes cluster. If the configuration location is different for the target Kubernetes cluster, update the command accordingly. + + + {{< note >}}The restore script [needs root access]({{< relref "/nms/acm/how-to/backup-recovery.md#root-access" >}}) to Kubernetes for the restore operation.{{< /note >}} + +1. The script will ask for the NGINX Management Suite namespace. Once the namespace has been provided, the script will use the specified backup archive. + +The restore script will only restore the databases and core secrets. If you want to restore the user passwords too, run the following commands on the extracted `k8s-backup-.tar.gz` file: + + ```shell + cd k8s-backup-/secrets + kubectl -n nms apply -f nms-auth.json + kubectl -n nms delete pod apigw- + ``` + +--- + +## ClickHouse + +ClickHouse supports backup and restore on versions greater than v22. + +For instructions on how to back up and restore the ClickHouse database, please refer to [ClickHouse's documentation](https://clickhouse.com/docs/en/operations/backup). + +To check your ClickHouse version, run the following command: + +```shell +clickhouse-server --version +``` diff --git a/content/nms/acm/how-to/deploy-api-connectivity-manager.md b/content/nms/acm/how-to/deploy-api-connectivity-manager.md new file mode 100644 index 000000000..40e94b691 --- /dev/null +++ b/content/nms/acm/how-to/deploy-api-connectivity-manager.md @@ -0,0 +1,254 @@ +--- +description: The guide provides step-by-step instructions to deploy F5 NGINX API Connectivity + Manager on Kubernetes using a Helm chart. +docs: DOCS-1276 +doctypes: +- task +tags: +- docs +title: Deploy API Connectivity Manager on Kubernetes +toc: true +weight: 20 +--- + +## Requirements + +Review the following requirements for API Connectivity Manager before continuing. + +### Install Instance Manager + +{{< important >}}To install API Connectivity Manager, you must first install Instance Manager. This is because API Connectivity Manager relies on features that are included with Instance Manager.{{< /important >}} + +- [Deploy Instance Manager on Kubernetes]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md" >}}) + +### Dependencies with Instance Manager + +Refer to the following table to see the module compatibility for each F5 NGINX Management Suite chart. + +{{< include "nim/kubernetes/nms-chart-supported-module-versions.md" >}} + + +--- + +## Download Docker Image {#download-docker-image} + +Follow these steps to download the Docker image for API Connectivity Manager: + +1. Go to the [MyF5 website](https://my.f5.com/manage/s/downloads), then select **Resources > Downloads**. +1. In the **Select Product Family** list, select **NGINX**. +1. In the **Product Line** list, select **NGINX API Connectivity Manager**. +1. Select the following download options: + + - **Product version** -- Select the version of API Connectivity Manager you want to install. Make sure this version is compatible with the version of Instance Manager you installed as a prerequisite. Refer to the [Dependencies with Instance Manager](#dependencies-with-instance-manager) section above. + - **Linux distribution** -- Select the Linux distribution you're deploying to. For example, **ubuntu**. + - **Distribution Version** -- Select the Linux distribution's version. For example, **20.04**. + - **Architecture** -- Select the architecture. For example, **amd64**. + +1. In the **Download Files** section, download the `nms-acm--img.tar.gz` file. + +--- + +## Load Docker Image {#load-docker-image} + +{{< note >}} To complete the commands in this section, you need to have [Docker 20.10 or later](https://docs.docker.com/get-docker/) installed. {{< /note >}} + + +1. Change to the directory where you downloaded the Docker image: + + ``` shell + cd + ``` + +1. Load the Docker image from the `nms-acm--img.tar.gz` archive: + + ``` shell + docker load -i nms-acm--img.tar.gz + ``` + + The output looks similar to the following: + + ``` shell + $ docker load -i nms-acm--img.tar.gz + 1b5933fe4b5: Loading layer [==================================================>] 5.796MB/5.796MB + fbe0fc9bcf95: Loading layer [==================================================>] 17.86MB/17.86MB + ... + 112ae1f604e0: Loading layer [==================================================>] 67.8MB/67.8MB + 4b6a693b90f4: Loading layer [==================================================>] 3.072kB/3.072kB + Loaded image: nms-acm:1.5.0 + ``` + + {{}} + Take note of the loaded image's name and tag. You'll need to reference this information in the next section when pushing the image to your private registry. + + In the example output above, `nms-acm` is the image name and `1.5.0` is the tag. The image name or tag could be different depending on the product version you downloaded from MyF5. + {{}} + +--- + +## Push Image to Private Registry {#push-docker-image} + +{{}}To complete the steps in this section, you need an [externally-accessible private Docker registry](https://docs.docker.com/registry/deploying/) to push the container images to.{{}} + +To push the Docker images to your private registry, take the following steps: + +- Replace `` with your private Docker registry and port (if needed). + +- Replace `` with the tag you noted when [loading the Docker image](#load-acm-docker-image) above. + +1. Log in to your private registry: + + ```shell + docker login + ``` + +2. Tag the image with the image name and version you noted when [loading the Docker image](#load-acm-docker-image). + + ```shell + docker tag nms-acm: /nms-acm: + ``` + + For example: + + ```shell + docker tag nms-acm:1.5 myregistryhost:5000/nms-acm:1.5 + ``` + +3. Push the image to your private registry: + + ```shell + docker push /nms-acm: + ``` + + For example: + + ```shell + docker push nms-acm:1.5 myregistryhost:5000/nms-acm:1.5 + ``` + +--- + +## Enable API Connectivity Manager + +To enable the API Connectivity Manager Module, take the following steps: + +1. Open the `values.yaml` file for editing. +1. Add the following snippet to the `values.yaml` file: + + - Replace `` with your private Docker registry and port (if needed). + - Replace `` with the tag you noted when [loading the Docker image](#load-acm-docker-image) above. + - In the `imagePullSecrets` section, add the credentials for your private Docker registry. + + ```yaml + # values.yaml + global: + nmsModules: + nms-acm: + enabled: true + nms-acm: + imagePullSecrets: + - name: regcred + acm: + image: + repository: /nms-acm + tag: + ``` + +1. Close and save the `values.yaml` file. + +--- + +## Upgrade NGINX Management Suite Deployment {#upgrade-nms} + +{{< note >}} To complete the steps in this section, you need to have [OpenSSL 1.1.1](https://www.openssl.org/source/) or later installed. {{}} + +Run the following command to upgrade the NGINX instance deployment: + +- Replace `` with the path to the [values.yaml file you created]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md#configure-chart" >}}). +- Replace `YourPassword123#` with a secure password that contains a combination of uppercase and lowercase letters, numbers, and special characters. + + {{< important >}}Make sure to copy and save the password for future reference. Only the encrypted password is stored in Kubernetes. There's no way to recover or reset a lost password.{{< /important >}} + +- (Optional) Replace `` with the desired version; see the table below for the available versions. Alternatively, you can omit this flag to install the latest version. + +```bash +helm upgrade -n nms --set nms-hybrid.adminPasswordHash=$(openssl passwd -6 'YourPassword123#') nms nginx-stable/nms -f [--version ] --wait +``` + +This command upgrades an existing Helm chart deployment named `nms` with a new version of the chart located in the `nginx-stable/nms` repository. It also sets the value of the `nms-hybrid.adminPasswordHash` to the hashed version of the provided password and uses a `values.yaml` file located at the provided path. + +### Upgrade Existing API Connectivity Manager Deployment {#upgrade-acm-helm} + +If you've already deployed API Connectivity Manager and would like to upgrade to a newer version, take the following steps: + +1. Repeat the steps above to: + + - [Download Newer Docker Image](#download-docker-image) + - [Load Docker Image](#load-docker-image) + - [Push Image to Private Docker Registry](#push-docker-image) + +2. Run the `helm upgrade` command above to [upgrade the NGINX Management Suite deployment](#upgrade-nms). + +--- + +## Access Web Interface + +{{< include "nim/kubernetes/access-webui-helm.md" >}} + +--- + +## Add License + +A valid license is required to make full use of all the features in API Connectivity Manager. + +Refer to the [Add a License]({{< relref "/nim/admin-guide/license/add-license.md" >}}) topic for instructions on how to download and apply a trial license, subscription license, or Flexible Consumption Program license. + +--- + +## Configurable Helm Settings + +The following table lists the configurable parameters and default values used by the API Connectivity Manager chart when installing from a Helm chart. + +To modify a configuration for an existing release, run the `helm upgrade` command and use `-f `, where `my-values-file` is a path to a values file with your desired configuration. + +{{}} + +| Parameter | Description | Default | +|:-----------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------| +| `nms-acm.acm.logLevel` | Set the log level for the backend API service. The log level can be `fatal`, `error`, `warning`, `info`, or `debug` | `info` | +| `nms-acm.acm.image.repository` | Repository name and path for the `acm` image. | `acm` | +| `nms-acm.acm.image.tag` | Tag used for pulling images from registry. | `latest` | +| `nms-acm.acm.image.pullPolicy` | Image pull policy. | `IfNotPresent` | +| `nms-acm.acm.container.port.http` | TCP port for the pod to listen on. | `8037` | +| `nms-acm.acm.container.port.db` | Port to use for Dqlite. | `9300` | +| `nms-acm.acm.metrics.enabled` | Enable metrics. | `false` | +| `nms-acm.acm.service.httpPort` | TCP port for the service to listen on. | `8037` | +| `nms-acm.acm.resources.requests.cpu` | CPU resource limits to allow for the `acm` pods. | `500m` | +| `nms-acm.acm.resources.requests.memory` | Memory resource limits to allow for the `api` pods. | `512Mi` | +| `nms-acm.acm.persistence.enabled` | Optionally disable persistent storage, used for database data. | `true` | +| `nms-acm.acm.persistence.claims` | An array of persistent volume claims, can be modified to use an existing PVC. | See the [Dqlite](#acm-dqlite-configuration) configuration section below. | +| `nms-acm.acm.devportal.credentials.enabled` | Enables the [Create Credentials Endpoint on the Developer Portal]({{< relref "/nms/acm/how-to/infrastructure/enable-create-credentials.md" >}}) | `false` | +| `nms-acm.acm.devportal.credentials.ssl` | This should be set to true if mTLS has been configured between API Connectivity Manager and the Developer Portal, for more information see [Create Credentials Endpoint on the Developer Portal]({{< relref "/nms/acm/how-to/infrastructure/enable-create-credentials.md" >}}) | `false` | +| `nms-acm.acm.devportal.client.caSecret.name` | This should be set if an unknown Certificate Authority is needed for communication with the Developer Portal in order to provide a CA certificate. This should be set to the name of the secret in the release namespace that contains the CA certificate. | Blank | +| `nms-acm.acm.devportal.client.caSecret.key` | This should be set if an unknown Certificate Authority is needed for communication with the Developer Portal in order to provide a CA certificate. This should be set to the key of the secret in the release namespace that contains the CA certificate. | Blank | + +{{}} + +##### API Connectivity Manager Dqlite Storage Configuration {#acm-dqlite-configuration} + +```yaml + - name: dqlite + existingClaim: + size: 500Mi + accessMode: ReadWriteOnce +``` + + +--- + +## Troubleshooting + + + +For guidance on how to create a support package containing system and service details to share with NGINX Customer Support, refer to the guide [Create a Support Package from a Helm Installation]({{< relref "/nms/support/k8s-support-package.md" >}}). + +--- diff --git a/content/nms/acm/how-to/devportals/_index.md b/content/nms/acm/how-to/devportals/_index.md new file mode 100644 index 000000000..3c4192c60 --- /dev/null +++ b/content/nms/acm/how-to/devportals/_index.md @@ -0,0 +1,5 @@ +--- +title: Developer Portals +weight: 300 +url: /nginx-management-suite/acm/how-to/devportals/ +--- \ No newline at end of file diff --git a/content/nms/acm/how-to/devportals/devportal-support-package.md b/content/nms/acm/how-to/devportals/devportal-support-package.md new file mode 100644 index 000000000..89a34edf4 --- /dev/null +++ b/content/nms/acm/how-to/devportals/devportal-support-package.md @@ -0,0 +1,106 @@ +--- +description: This guide explains how to generate a support package for troubleshooting + error scenarios. +docs: DOCS-1259 +doctypes: +- reference +- task +tags: +- docs +title: Create an NGINX Developer Portal Support Package +toc: true +weight: 300 +--- + +{{< shortversions "1.0.0" "latest" "acmvers" >}} +## Overview + +The support package script can be used to collect information about your system for troubleshooting and debugging issues. + +The script collects system and service information and then packages the data into a tar archive, which you can share with [NGINX Customer Support]({{< relref "/nms/support/contact-support.md" >}}). + +## Usage + +The NGINX Developer Portal installer copies the `support-package.sh` script to the following location: `/etc/nginx-devportal/scripts/support-package.sh`. + +To create a support package: + +1. Run the support package script. The script requires root privileges to run. + + ``` bash + sudo bash /etc/nginx-devportal/scripts/support-package.sh + ``` + + The support package is saved in the same location from where the script was run. + + (Optional) If you use a different NGINX Developer Portal config file than the default `/etc/nginx-devportal/devportal.conf` file, run the support package script with the `-c` flag and specify the path to your config file: + + ```bash + sudo bash /etc/nginx-devportal/scripts/support-package.sh -c /your/config.conf + ``` + +2. To extract the package, use the `tar` command: + + ```bash + tar -xvf support-pkg-.tar.gz + ``` + +{{< note >}} +The supported shell is `bash`. +{{< /note >}} + +### Arguments + +The following table lists the arguments you can use with the support package script. + +{{}} + +| Short | Long | Description | Example | Default | +| ----- | ------------------------- | ------------------------------------------------------------------- | ---------------------------------------- | ------------------------------------- | +| `-h` | `--help` | Prints information about the script arguments to stdout. | `--help` | N/A | +| `-o` | `--output_dir` | The output directory where the tar archive is saved. | `-o ~/output` | `$(pwd)` | +| `-c` | `--devportal_config_path` | The path to the NGINX Developer Portal config file. | `-c /etc/nginx-devportal/devportal.conf` | `/etc/nginx-devportal/devportal.conf` | +| `-m` | `--devportal_log_path` | The directory where the NGINX Developer Portal log file is located. | `-m /var/log/nginx-devportal.log` | `/var/log/nginx-devportal.log` | + +{{}} + +## Package Contents + +The support package includes several directories containing information about the system, service, and database state. + +The information included is based on the F5 NGINX products installed and configured. + +### devportal-logs + +The logs of the NGINX Developer Portal process. + +### service-information + +Information about the NGINX Developer Portal service running on the host. For the `nginx-devportal` process, the script collects: + +- `journalctl` (10000 most recent rows) +- `systemctl status` + +### system-information + +The status and state information of the host running NGINX Developer Portal, including the following: + +- System metrics (memory usage, CPU usage, etc.) +- File permissions of the Developer Portal +- Firewall or SELinux state +- Network interfaces +- Network information (hostname, iptables) +- Environment variables +- Disk usage of select directories +- Operating system version +- Installed Developer Portal version + +### database snapshot + +The support package script uses the `-c` flag ( or `--devportal_config_path`) to get the NGINX Developer Portal configuration. If the configuration file is not specified, the script uses the default value `/etc/nginx-devportal/devportal.conf`. + +As the NGINX Developer Portal supports both SQLite & PostreSQL database types, the support package script will determine the database settings from the `devportal.conf` configuration file. + +{{< note >}} +The NGINX Developer Portal support package script will try to utilize the relevant data backup tool for the database type used. For example, the `sqlite3` binary will be needed in your path to allow a SQLite data dump to occur. Similarly for PostgreSQL the `pg_dump` tool will be required. If the relevant data dump tool is not currently found in the systems `$PATH`, an error will be logged to the console. +{{< /note >}} diff --git a/content/nms/acm/how-to/devportals/installation/_index.md b/content/nms/acm/how-to/devportals/installation/_index.md new file mode 100644 index 000000000..3ae46efff --- /dev/null +++ b/content/nms/acm/how-to/devportals/installation/_index.md @@ -0,0 +1,4 @@ +--- +title: Install the Developer Portal +weight: 100 +--- \ No newline at end of file diff --git a/content/nms/acm/how-to/devportals/installation/configure-devportal-helm-options.md b/content/nms/acm/how-to/devportals/installation/configure-devportal-helm-options.md new file mode 100644 index 000000000..495fb9ff1 --- /dev/null +++ b/content/nms/acm/how-to/devportals/installation/configure-devportal-helm-options.md @@ -0,0 +1,40 @@ +--- +description: 'This guide lists and describes the parameters you can set when deploying + the Developer Portal from a Helm chart. ' +docs: DOCS-1171 +doctypes: +- task +tags: +- docs +title: Deployment Options for Developer Portal Helm +toc: true +weight: 25 +--- + +{{< shortversions "1.3.0" "latest" "acmvers" >}} + +## Default Developer Portal Helm Settings {#default-devportal-helm-settings} + +This topic lists the default values that are used when [installing the Developer Portal from a Helm chart]({{< relref "/nim/deploy/kubernetes/deploy-using-helm.md" >}}). You can change these values to meet your specific needs. + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configuration-options.md" >}} + +--- + +## Common Deployment Configurations {#common-deployment-configs} + +### Deploy Developer Portal with an SQLite database + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-helm-devportal-sqlite.md" >}} + +### Deploy Developer Portal with an embedded PostgreSQL database + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-embedded-postgres.md" >}} + +### Deploy Developer Portal with an external PostgreSQL database + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-external-postgres.md" >}} + +### Deploy Developer Portal using TLS for the backend API service + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-api-mtls.md" >}} diff --git a/content/nms/acm/how-to/devportals/installation/devportal-helm-chart.md b/content/nms/acm/how-to/devportals/installation/devportal-helm-chart.md new file mode 100644 index 000000000..d61a3bcfd --- /dev/null +++ b/content/nms/acm/how-to/devportals/installation/devportal-helm-chart.md @@ -0,0 +1,456 @@ +--- +description: Follow the steps in the guide to deploy the API Connectivity Manager + Developer Portal to Kubernetes using a Helm chart. +docs: DOCS-1110 +doctypes: +- tutorial +tags: +- docs +title: Deploy the Developer Portal from a Helm chart +toc: true +weight: 20 +--- + +{{< shortversions "1.3.0" "latest" "acmvers" >}} + +## Overview + +Follow the steps in this section to install, upgrade, or uninstall the API Connectivity Manager Developer Portal on Kubernetes using Helm. + +--- + +## Before You Begin + +To complete the steps in this section, you need the following: + +- A working knowledge of Docker and how to build and extend containers +- An installed, licensed, and running version of API Connectivity manager +- A installed version of Helm v3.10.0 or newer +- An [externally-accessible private Docker registry](https://docs.docker.com/registry/deploying/) to push the container images to +- Your F5 NGINX Plus certificate and key files, which you can download from [MyF5](https://my.f5.com/manage/s/) + +{{}} + +- Take a few minutes to review the [Configurable Helm Settings](#configuration-options) at the end of this topic. You can change these settings to customize your installation to meet your needs. + +- Check out the [Deployment Patterns for Developer Portal]({{< relref "/nms/acm/how-to/infrastructure/configure-devportal-backend.md" >}}) topic if you're considering installing the Developer Portal on a single host or on a cluster for high availability. + +{{}} + +--- + +## Download the Developer Portal Container Images {#download-devportal-api-image} + +1. On the [MyF5 website](https://my.f5.com/manage/s/downloads), select **Resources > NGINX Downloads**. +2. In the NGINX products list, select **NGINX API Connectivity Manager**. +3. Select the following download options. Pick the version that you require; in this guide, we've chosen 1.3.0 as an example: + + **Product version:** 1.3.0 + **Linux distribution:** Ubuntu + **Distribution Version:** 20.04 + **Architecture:** amd64 + +4. Download the `nginx-devportal-api--img.tar.gz` file. +5. Download the `nginx-devportal-apigw--img.tar.gz` file. + {{< note >}} + If you require a version of NGINX Plus other than what is provided, please see the optional section on [building the API Gateway Container Image](#build-apigw-docker-image). + {{}} + +--- + +## Load Docker Images {#load-docker-image} + +1. Change to the directory where you downloaded the Docker images: + + ``` shell + cd + ``` + +2. Load the Docker image: + + ``` shell + docker load -i nginx-devportal-api--img.tar.gz + docker load -i nginx-devportal-apigw--img.tar.gz + ``` + + The output looks similar to the following: + + ``` shell + $ docker load -i nginx-devportal-api--img.tar.gz + f4373956a745: Loading layer [==================================================>] 2.171MB/2.171MB + 95de16926adc: Loading layer [==================================================>] 15.62MB/15.62MB + Loaded image: nginx-devportal-api:1.5.0 + $ docker load -i nginx-devportal-apigw--img.tar.gz + 0e2737d1d5b7: Loading layer [==================================================>] 1.097MB/1.097MB + 2b64694bf95f: Loading layer [==================================================>] 83.19MB/83.19MB + 1e8cac41ce82: Loading layer [==================================================>] 2.56kB/2.56kB + Loaded image: nginx-devportal-apigw:1.5.0-r28 + ``` + + {{}} + Take note of the loaded image's name and tag. You'll need to reference this information in the next section when pushing the image to your private registry. + + In the example output above, `nginx-devportal-api` is the image name and `1.5.0` is the tag for the first image. For the second image `nginx-devportal-apigw` is the image name and `1.5.0-r28` is the tag (where `1.5.0` is the release version and `r28` is the NGINX Plus version). The image names or tags could be different depending on the product version you downloaded from MyF5. + {{}} + +### (Optional) Build the API Gateway Container Image {#build-apigw-docker-image} + {{< note >}} + This is step is only required for versions of API Connectivity Manager Developer Portal prior to `1.5.0` or if you require a specific release of NGINX Plus that is not provided on MyF5. + {{< /note >}} +
    + Build the API Gateway Container Image + The Developer Portal Helm chart requires a container image that includes the NGINX Plus service and NGINX Agent in order to deploy the chart and have the API Gateway register with the API Connectivity Manager control plane. + +In this example, we use Ubuntu (focal), but other supported distributions can be used. + +
    + Supported Linux distributions + +{{< include "tech-specs/acm-dev-portal-supported-distros.md" >}} + +
    + +Create a Dockerfile similar to the following example: + +1. Create a Dockerfile similar to the following example: + +
    + Example Dockerfile + + {{< fa "download" >}} {{< link "/acm/containers/devportal/Dockerfile" "Download example Dockerfile" >}} + + ```Dockerfile + FROM ubuntu:focal + + # NGINX Plus release e.g 27 + ARG NGINX_PLUS_VERSION + + # DEVPORTAL release e.g 1.3.0 + ARG DEVPORTAL_UI_VERSION + + ARG CONTROL_PLANE_IP + + # Install NGINX Plus + RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + set -ex \ + && apt-get update \ + && apt-get upgrade -y \ + && apt-get install --no-install-recommends --no-install-suggests -y \ + curl \ + gnupg \ + ca-certificates \ + apt-transport-https \ + lsb-release \ + procps \ + && \ + NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \ + for server in \ + hkp://keyserver.ubuntu.com:80 \ + pgp.mit.edu; do \ + echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ + gpg --keyserver "$server" \ + --recv-keys "$NGINX_GPGKEY" \ + && break; \ + done \ + # Configure APT repos + && gpg --export "$NGINX_GPGKEY" > /etc/apt/trusted.gpg.d/nginx.gpg \ + && printf "Acquire::https::pkgs.nginx.com::SslCert \"/etc/ssl/nginx/nginx-repo.crt\";\n" >> /etc/apt/apt.conf.d/90pkgs-nginx \ + && printf "Acquire::https::pkgs.nginx.com::SslKey \"/etc/ssl/nginx/nginx-repo.key\";\n" >> /etc/apt/apt.conf.d/90pkgs-nginx \ + && printf "deb https://pkgs.nginx.com/plus/$(lsb_release -is | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) nginx-plus\n" > /etc/apt/sources.list.d/nginx-plus.list \ + && printf "deb https://pkgs.nginx.com/nms/$(lsb_release -is | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) nginx-plus\n" > /etc/apt/sources.list.d/nms.list \ + && apt-get update \ + # Install NGINX Plus & agent\ + && apt-get install -y \ + nginx-plus=${NGINX_PLUS_VERSION}* \ + nginx-plus-module-njs=${NGINX_PLUS_VERSION}* \ + nginx-devportal-ui=${DEVPORTAL_UI_VERSION}* \ + && curl --insecure https://$CONTROL_PLANE_IP/install/nginx-agent | PACKAGE_HOST=${CONTROL_PLANE_IP} sh \ + # Forward request and error logs to docker log collector \ + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log \ + # Cleanup \ + && apt-get autoremove --purge -y \ + curl \ + gnupg \ + apt-transport-https \ + lsb-release \ + && rm -rf /root/.gnupg \ + && rm -rf /etc/apt/sources.list.d/nginx-plus.list /etc/apt/sources.list.d/nms.list /etc/apt/apt.conf.d/90pkgs-nginx \ + && rm -rf /var/lib/apt/lists/* + + COPY /entrypoint.sh / + + STOPSIGNAL SIGTERM + + CMD bash /entrypoint.sh + ``` + +
    + +
    + +2. Add an `entrypoint.sh` file similar to the following example to the same directory where you added the Dockerfile: + +
    + Example entrypoint.sh + + {{< fa "download" >}} {{< link "/acm/containers/devportal/entrypoint.sh" "Download example entrypoint.sh file" >}} + + ```bash + #!/bin/bash + + set -euxo pipefail + + handle_term() + { + echo "received TERM signal" + echo "stopping nginx-agent ..." + kill -TERM "${agent_pid}" 2>/dev/null + echo "stopping nginx ..." + kill -TERM "${nginx_pid}" 2>/dev/null + } + + trap 'handle_term' TERM + + if [ -z "${CONTROL_PLANE_IP}" ]; then + echo "ERROR CONTROL_PLANE_IP environment variable needs to be set." + exit 1 + fi + + if [ -z "${INSTANCE_GROUP}" ]; then + echo "ERROR INSTANCE_GROUP environment variable needs to be set." + exit 1 + fi + + # Launch nginx + echo "starting nginx ..." + nginx -g "daemon off;" & + + nginx_pid=$! + + # start nginx-agent, pass args + echo "starting nginx-agent ..." + nginx-agent --instance-group "${INSTANCE_GROUP}" --server-host "${CONTROL_PLANE_IP}" & + + agent_pid=$! + + wait_term() + { + wait ${agent_pid} + trap - TERM + kill -QUIT "${nginx_pid}" 2>/dev/null + echo "waiting for nginx to stop..." + wait ${nginx_pid} + } + + wait_term + + echo "nginx-agent process has stopped, exiting." + ``` + +
    + +
    + +3. Add your NGINX Plus certificate and key files to the same directory as the Dockerfile. You can download these files from the [MyF5](https://my.f5.com/manage/s/) site. + +4. Build the Dockerfile and specify the following settings: + + - `CONTROL_PLANE_IP`: The IP address or hostname of your API Connectivity Manager control plane host + - `NGINX_PLUS_VERSION`: The version of NGINX Plus that you want to use; for example, `28` + - `DEVPORTAL_UI_VERSION`: The version of the Developer Portal UI that you want to use; for example, `1.5.0` + + ```bash + export CONTROL_PLANE_IP= + export NGINX_PLUS_VERSION= + export DEVPORTAL_UI_VERSION= + export DEVPORTAL_UI_TAG=${DEVPORTAL_UI_VERSION}-r${NGINX_PLUS_VERSION} + export DOCKER_BUILDKIT=1 + docker build \ + -t nginx-devportal-apigw:$DEVPORTAL_UI_TAG \ + --build-arg CONTROL_PLANE_IP \ + --build-arg NGINX_PLUS_VERSION \ + --build-arg DEVPORTAL_UI_VERSION \ + --secret id=nginx-crt,src=nginx-repo.crt \ + --secret id=nginx-key,src=nginx-repo.key \ + . + ``` + +
    + +--- + +## Push Images to Private Registry {#push-images-private-registry} + +{{}}To complete this step, you need an [externally-accessible private Docker registry](https://docs.docker.com/registry/deploying/) to push the container images to.{{}} + +After building or loading the Docker images, you can now tag and push the images to your private Docker registry. Replace `` in the examples below with the path to your private Docker registry. + +1. Log in to your private registry: + + ```shell + docker login + ``` + +2. Tag the images with the values you noted when completing the [Load Docker Images](#load-docker-image) steps above. + + ```shell + docker tag nginx-devportal-apigw: /nginx-devportal-apigw: + docker tag nginx-devportal-api: /nginx-devportal-api: + ``` + +3. Push the images to your private registry: + + ```shell + docker push /nginx-devportal-apigw: + docker push /nginx-devportal-api: + ``` + +--- + +## Add Helm Repository {#add-helm-repository} + +Run the following commands to install the NGINX Management Suite chart from the Helm repository: + +```shell +helm repo add nginx-stable https://helm.nginx.com/stable +helm repo update +``` + +The first command, `helm repo add nginx-stable https://helm.nginx.com/stable`, adds the `nginx-stable` repository to your local Helm repository list. This repository contains the Helm charts for deploying NGINX Management Suite. + +The second command, `helm repo update`, updates the local Helm repository list with the newest versions of the charts from the `nginx-stable` repository. This command ensures you have the most up-to-date version of the charts available for installation. + +--- + +## Configure Chart to Pull from Private Docker Registry {#configure-chart} + +A Helm `values.yaml` file is a configuration file you can use to customize the installation of a Helm chart without actually editing the chart itself, allowing for faster and more efficient deployments. Values can be used to specify different image repositories and tags, set environment variables, configure resource requests and limits, and more. + +1. Create a `values.yaml` file similar to the following example. This file is used to customize the configuration of the NGINX Developer Portal chart located in the `nginx-stable` Helm repository that you [added above](#add-helm-repository). + + ```yaml + # values.yaml + imagePullSecrets: + - name: regcred + apigw: + acmService: + enabled: true + type: LoadBalancer + image: + repository: /nginx-devportal-apigw + tag: + controlPlane: + host: + instanceGroup: + service: + type: LoadBalancer + api: + image: + repository: /nginx-devportal-api + tag: + db: + type: + acm: + client: + caSecret: + name: acm-tls + key: ca.crt + + ``` + + - Replace `` with your private Docker registry. + - Replace `` with the tag you used when [pushing the images to your private registry](#push-images-private-registry). + - In the `imagePullSecrets` section, add the credentials for your private Docker registry. + + {{}}The contents of `api.acm.client.caSecret.key` can be obtained from the `/etc/nms/certs/apigw/ca.pem` on the control plane.{{}} + + This `values.yaml` file specifies the Docker images to be used for the NGINX Developer Portal `apigw` and `api` components, including the repository (``) and tag (`version`) of each image. It also specifies that a secret called `regcred` should be used for image pulls. + + {{}}For instructions on creating a secret, see the Kubernetes topic [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/).{{}} + +2. Save and close the `values.yaml` file. + +## Install the Chart + +The Developer Portal does not require (although it is recommended) a dedicated namespace for the data plane. You can create this namespace yourself, or you can allow Helm to create it for you by using the `--create-namespace` flag when installing. + +{{< note >}} +If persistent storage is not configured in your cluster, set the `apigw.persistence.enabled` and `api.persistence.enabled` values to `false` either in the values file or using the `--set` helm commands. +{{< /note >}} + +To install the chart with the release name `devportal` and namespace `devportal`, run the following command: + +```bash +helm install devportal nginx-stable/nginx-devportal --namespace devportal --create-namespace -f [--version ] --wait +``` + +--- + +## Upgrade the Chart {#upgrade-the-chart} + +You can upgrade to the latest Helm chart from the version immediately before it. For example, you can upgrade from v1.3.0 to v1.3.1. + +### Upgrade the Release + +To upgrade the release `devportal` in the `devportal` namespace, run the following command: + +```bash +helm upgrade devportal nginx-stable/nginx-devportal --namespace devportal -f [--version ] --wait +``` + +### Change Configuration Options + +You can use the `helm upgrade` command to change or apply additional configurations to the release. + +To change a configuration, use `--set` commands or `-f `, where `my-values-file` is a path to a values file with your desired configuration. + +--- + +## Uninstall the Chart + +To uninstall and delete the release `devportal` in the `devportal` namespace, take the following step: + +```bash +helm uninstall devportal --namespace devportal +``` + +This command removes all of the Kubernetes components associated with the Developer Portal release. The namespace is not deleted. + +--- + +## Configurable Helm Settings {#configuration-options} + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configuration-options.md" >}} + +
    + +## Common Deployment Configurations + +Select from the following options to view some of the commonly used configurations for the Developer Portal. To apply these configurations, edit the `values.yaml` file as needed. + +### Deploy Developer Portal with an SQLite database + +{{< note >}} +This configuration is recommended for proof of concept installations and not for production deployments. +{{}} + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-helm-devportal-sqlite.md" >}} + +### Deploy Developer Portal with an embedded PostgreSQL database + +{{< note >}} +This configuration is recommended for proof of concept installations and not for production deployments. +{{}} + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-embedded-postgres.md" >}} + +### Deploy Developer Portal with an external PostgreSQL database + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-external-postgres.md" >}} + +### Deploy Developer Portal using TLS for the backend API service + +{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-api-mtls.md" >}} + diff --git a/content/nms/acm/how-to/devportals/installation/install-dev-portal.md b/content/nms/acm/how-to/devportals/installation/install-dev-portal.md new file mode 100644 index 000000000..ef47cc3de --- /dev/null +++ b/content/nms/acm/how-to/devportals/installation/install-dev-portal.md @@ -0,0 +1,266 @@ +--- +description: Follow the steps in this guide to install or upgrade the Developer Portal + for F5 NGINX Management Suite API Connectivity Manager. +docs: DOCS-1214 +doctypes: +- tutorial +tags: +- docs +title: Install or Upgrade the Developer Portal +toc: true +weight: 10 +--- + +--- + +## Platform Requirements {#acm-devportal-requirements} + +{{}}To run the Developer Portal, you need a **dedicated** Linux host specifically for this purpose. **Do not** install the Developer Portal on a host that is currently serving as a management or data plane.{{}} + +Complete the following steps to prepare the Developer Portal for use with API Connectivity Manager: + +1. [Install F5 NGINX Plus R24 or later](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) +2. [Install NGINX njs module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) + +
    + Supported Linux distributions + +{{< include "tech-specs/acm-dev-portal-supported-distros.md" >}} + +
    + +
    + +--- + +## Prerequisites + +### Add NGINX Management Suite Repository {#add-yum-apt} + +{{< include "installation/add-nms-repo.md" >}} + +### Install PostgreSQL or SQLite + +The Developer Portal requires a PostgreSQL or SQLite database to store configuration settings and analytics information. + +Select the tab for the database you want to use, then follow the installation instructions. + +{{}} + +{{%tab name="PostgreSQL"%}} + +To use PostgreSQL for the Developer Portal database, take the following steps: + +1. Install PostgreSQL: + + - CentOS, RHEL, RPM-based: + + ```bash + sudo yum install -y postgresql-server + sudo postgresql-setup initdb + ``` + + - Debian, Ubuntu, Deb-based: + + ```bash + sudo apt-get install -y postgresql + ``` + +2. Configure the PostgreSQL host-based authentication (HBA) file: + + - CentOS, RHEL, RPM-based: + + ``` bash + cat << EOF | sudo tee /var/lib/pgsql/data/pg_hba.conf + + # TYPE DATABASE USER ADDRESS METHOD + + local all postgres peer + local all all md5 + # IPv4 local connections: + host all all 127.0.0.1/32 md5 + # IPv6 local connections: + host all all ::1/128 md5 + EOF + ``` + + - Debian, Ubuntu, Deb-based: + + ``` bash + cat << EOF | sudo tee /etc/postgresql//main/pg_hba.conf + + # TYPE DATABASE USER ADDRESS METHOD + + local all postgres peer + local all all md5 + # IPv4 local connections: + host all all 127.0.0.1/32 md5 + # IPv6 local connections: + host all all ::1/128 md5 + EOF + ``` + +3. Restart PostgreSQL: + + ``` bash + sudo systemctl restart postgresql + ``` + +4. Create the `devportal` database, add the `nginxdm` user, and assign privileges: + + ```bash + sudo -u postgres createdb devportal + sudo -u postgres psql -c "CREATE USER nginxdm WITH LOGIN PASSWORD 'nginxdm';" + sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE devportal TO nginxdm;" + ``` + +{{%/tab%}} + +{{%tab name="SQLite"%}} + +To use SQLite for the Developer Portal database, run the following commands: + +```bash +echo 'DB_TYPE="sqlite"' | sudo tee -a /etc/nginx-devportal/devportal.conf +echo 'DB_PATH="/var/lib/nginx-devportal"' | sudo tee -a /etc/nginx-devportal/devportal.conf +``` + +
    + +{{%/tab%}} + +{{
    }} + +--- + +## Install the Developer Portal + +{{}} +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +1. To install the Developer Portal, run the following command: + + ```bash + sudo yum -y install nginx-devportal nginx-devportal-ui + ``` + +{{%/tab%}} + +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +1. To install the Developer Portal, run the following commands: + + ```bash + sudo apt-get update + sudo apt-get -y install nginx-devportal nginx-devportal-ui + ``` + +{{%/tab%}} +{{}} + +3. Enable the Developer Portal service: + + ```bash + sudo systemctl enable nginx-devportal.service + ``` + +2. Start the Developer Portal service: + + ```bash + sudo systemctl start nginx-devportal.service + ``` + + --- + +## Upgrade the Developer Portal + +{{}} +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +1. To install the latest version of the Developer Portal, run the following command: + + ```bash + sudo yum update -y nginx-devportal nginx-devportal-ui + ``` + +{{%/tab%}} + +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +1. To install the latest version of the Developer Portal, run the following commands: + + ```bash + sudo apt-get update + sudo apt-get upgrade -y nginx-devportal nginx-devportal-ui + ``` + +{{%/tab%}} +{{}} + +2. Enable the Developer Portal service: + + ```bash + sudo systemctl enable nginx-devportal.service + ``` + +3. Restart the Developer Portal service: + + ```bash + sudo systemctl restart nginx-devportal.service + ``` + +--- + +## Secure Developer Portal API communication + +Depending on your [deployment pattern for the Developer Portal]({{< relref "/nms/acm/how-to/infrastructure/configure-devportal-backend.md" >}}), you may have either a single host installation(default) or a multi-host installation for high availability. We recommend using mTLS for the communication between the NGINX reverse proxy and the Developer Portal APIs to provide maximum security. + +1. On the Developer Portal Service host or hosts, edit the Dev Portal configuration file located at `/etc/nginx-devportal/devportal.conf` +1. Add the location of the server certificate and certificate key, as shown in the example below. + + ```yaml + CERT_FILE="/path/to/devportal-server.crt" + KEY_FILE="/path/to/devportal-server.key" + INSECURE_MODE=false + CA_FILE="/path/to/ca.pem" # If using mTLS + CLIENT_VERIFY=true # If using mTLS + ``` + +1. Adjust the permissions of each of the certificate and key files provided to ensure they are readable by the Dev Portal backend service. +1. Restart the developer portal backend service: + + ```shell + sudo systemctl restart nginx-devportal + ``` + +1. If mTLS is configured on your Developer Portal service, you must add a TLS Backend Policy to both; + - The Developer Portal Cluster (Used for communication from users to the Developer Portal API) + - The Developer Portal Internal Cluster (For communication from the API Connectivity Manager to your Devportal Portal API to publish and maintain information) +{{< note >}} +To add a TLS Backend Policy to both clusters. Follow the [TLS Policies]({{< relref "/nms/acm/how-to/policies/tls-policies.md#add-tls-listener" >}}) documentation. +{{< /note >}} + +--- + +## Secure communication from the Developer Portal to NGINX Management Suite host with mTLS + +For complete Developer Portal functionality, such as the ability to create credentials from the Developer Portal, mTLS must be added for server-to-server communication. + +Follow the steps below to make sure NGINX Management Suite host can verify the client certificates provided by the Developer Portals backend service. + +1. Edit the NGINX Management Suite configuration file located at `/etc/nginx/conf.d/nms-http.conf`. +1. Add the location of the CA PEM file to the `ssl_client_certificate` directive, as shown in the example below: + + ```yaml + ssl_certificate /etc/nms/certs/manager-server.pem; + ssl_certificate_key /etc/nms/certs/manager-server.key; + ssl_client_certificate /etc/nms/certs/ca.pem; + ``` + +1. Reload the NGINX configuration: + + ```shell + sudo nginx -s reload + ``` + +1. Follow the steps in the [TLS Policies]({{< relref "/nms/acm/how-to/policies/tls-policies.md#/#tls-internal-cluster" >}}) documentation to add TLS policies that will enforce mTLS using these the correct client keys to connect to the NGINX Management Suite host. diff --git a/content/nms/acm/how-to/devportals/installation/install-devportal-offline.md b/content/nms/acm/how-to/devportals/installation/install-devportal-offline.md new file mode 100644 index 000000000..94200275e --- /dev/null +++ b/content/nms/acm/how-to/devportals/installation/install-devportal-offline.md @@ -0,0 +1,177 @@ +--- +description: Complete the steps in this guide to install the Developer Portal directly + from package files in environments without Internet access. +docs: DOCS-1215 +doctypes: +- tutorial +tags: +- docs +title: Install the Developer Portal in an Offline Environment +toc: true +weight: 30 +--- + +{{< dev-portal-dedicated-host >}} + + +## Prerequisites + +The Developer Portal requires [PostgreSQL](https://www.postgresql.org), [F5 NGINX Plus R24](https://docs.nginx.com/nginx/) or later, and [njs](https://nginx.org/en/docs/njs/). + +### PostgreSQL + +You can install the PostgreSQL package from your distribution’s repo at the same time you install the operating system. Refer to the the [PostgreSQL download guide](https://www.postgresql.org/download/) for instructions. + +### NGINX Plus and njs + +To install NGINX Plus and njs, take the following steps on the Developer Portal host: + +1. Log in to MyF5 and download your `nginx-repo.crt` and `nginx-repo.key` files. +2. Copy the `nginx-repo.crt` and `nginx-repo.key` files to the `/etc/ssl/nginx/` directory: + + ```bash + sudo cp nginx-repo.crt /etc/ssl/nginx/ + sudo cp nginx-repo.key /etc/ssl/nginx/ + ``` + +3. Select the following link to download the `fetch-external-acm-dataplane-dependencies.sh` script. This script downloads the necessary NGINX Plus and njs packages to a `tar.gz` archive. + + {{}} {{}} + +4. To download the NGINX Plus and njs dependencies, run the `fetch-external-acm-dataplane-dependencies.sh` script. As parameters, specify your Linux distribution and the location of your `nginx-repo.crt` and `nginx-repo.key` files. + + ```bash + sudo bash fetch-external-acm-dataplane-dependencies.sh /etc/ssl/nginx/nginx-repo.crt /etc/ssl/nginx/nginx-repo.key + ``` + + Supported Linux distributions: + + - `ubuntu18.04` + - `ubuntu20.04` + - `ubuntu22.04` + - `debian10` + - `debian11` + - `centos7` + - `rhel7` + - `rhel8` + - `rhel9` + - `amzn2` + + For example, to download external dependencies for Ubuntu 20.04: + + ```bash + sudo bash fetch-external-acm-dataplane-dependencies.sh ubuntu20.04 /etc/ssl/nginx/nginx-repo.crt /etc/ssl/nginx/nginx-repo.key + ``` + + In this example, the script creates an archive called `acm-dataplane-dependencies-ubuntu20.04.tar.gz` with the external dependencies. + +5. After you copy and extract the bundle onto your target machine, take the following steps to install the packages: + + {{< note >}}The bundled NGINX Plus package may conflict with installed versions of NGINX Plus. Delete the package from the bundle if you want to keep the existing version.{{< /note >}} + + {{}} + {{%tab name="CentOS, RHEL, and RPM-Based"%}} + +```bash +tar -kzxvf acm-dataplane-dependencies-.tar.gz +sudo yum localinstall *.rpm +``` + + {{%/tab%}} + {{%tab name="Debian, Ubuntu, and Deb-Based"%}} + +```bash +tar -kzxvf acm-dataplane-dependencies-.tar.gz +sudo dpkg -i ./*.deb +``` + +{{%/tab%}} +{{}} + +--- + +## Install the Developer Portal + +{{}} +{{%tab name="CentOS, RHEL, and RPM-Based"%}} + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the NGINX Developer Portal package files. + +2. Install the NGINX Developer Portal packages: + + ```bash + sudo yum -y --nogpgcheck install /home/user/nginx-devportal-.x86_64.rpm + sudo yum -y --nogpgcheck install /home/user/nginx-devportal-ui-.x86_64.rpm + ``` + +{{%/tab%}} +{{%tab name="Debian, Ubuntu, and Deb-Based"%}} + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the NGINX Developer Portal package files. + +2. Install the NGINX Developer Portal package: + + ```bash + sudo apt-get -y install -f /home/user/nginx-devportal__amd64.deb + sudo apt-get -y install -f /home/user/nginx-devportal-ui__amd64.deb + ``` + +{{%/tab%}} +{{}} + +3. Enable the Developer Portal service: + + ```bash + sudo systemctl enable nginx-devportal.service + ``` + +4. Start the Developer Portal service: + + ```bash + sudo systemctl restart nginx-devportal.service + ``` + +--- + +## Upgrade the Developer Portal + +To upgrade the Developer Portal in an offline environment, take the following steps: + +{{}} +{{%tab name="CentOS, RHEL, and RPM-Based"%}} + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the Developer Portal package files. + +2. Upgrade the Developer Portal packages: + + ```bash + sudo yum -y --nogpgcheck update /home/user/nginx-devportal_.x86_64.rpm + sudo yum -y --nogpgcheck update /home/user/nginx-devportal-ui_.x86_64.rpm + ``` + +{{%/tab%}} +{{%tab name="Debian, Ubuntu, and Deb-Based"%}} + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the Developer Portal package files. + +2. Upgrade the Developer Portal packages: + + ```bash + sudo apt-get -y install -f /home/user/nginx-devportal__amd64.deb + sudo apt-get -y install -f /home/user/nginx-devportal-ui__amd64.deb + ``` + +{{%/tab%}} +{{}} + +3. Enable the following Developer Portal service: + + ```bash + sudo systemctl enable nginx-devportal.service + ``` + +4. Restart the Developer Portal service: + + ```bash + sudo systemctl restart nginx-devportal.service + ``` diff --git a/content/nms/acm/how-to/devportals/publish-to-devportal.md b/content/nms/acm/how-to/devportals/publish-to-devportal.md new file mode 100644 index 000000000..5ddecde84 --- /dev/null +++ b/content/nms/acm/how-to/devportals/publish-to-devportal.md @@ -0,0 +1,78 @@ +--- +description: This document provides instructions on how to publish API documentation + and API proxies to a Developer Portal in order to make them available at a designated + hostname. +docs: DOCS-1082 +doctypes: +- task +tags: +- docs +title: Publish Docs to a Developer Portal +toc: true +weight: 200 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +--- + +## Overview + +This document will guide you through the process of publishing API documentation and API proxies to a Developer Portal. You will find instructions on how to add an API spec file, publish API documentation and the associated API proxy, or publish API documentation only. After completing these steps, you should be able to access your API and documentation at the designated hostname. + +--- + +## Before You Begin + +To complete the steps in this guide, you need the following: + +- [API Connectivity Manager is installed]({{< relref "/nim/deploy/_index.md" >}}) and running +- One or more environments with a [configured Developer Portal]({{< relref "/nms/acm/getting-started/add-devportal.md" >}}) +- (Optional) [Customize the Developer Portal]({{< relref "/nms/acm/how-to/infrastructure/customize-devportal.md" >}}) + +--- + +## How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +--- + +## Add an API Doc + +1. On the sidebar, select **Services**. +1. Select your workspace. +1. Select **API Docs > Add API Doc**. +1. Browse your local filesystem and select the API Spec in YAML or JSON format that you'd like to upload. +1. Select **Save**. + +## Publish the API Documentation and API Proxy + +1. Select **Services** on the sidebar. +1. Select your workspace from the **Workspaces** list. +1. On the **API Proxies** section, select **Publish to Proxy**. +1. On the **Name** box, type the name for the backend service. `-svc` will be added to the name automatically. +1. On the **Service Target Hostname**, type the hostname for the Service Target. +1. On the **API Proxy** section, the **Name** box is automatically filled. +1. On the **API Spec** section, select your spec using the list. +1. Select your **Gateway Proxy Hostname** using the list. +1. Confirm the **Base Path** and **Version** on the **Ingress** section. Update the default values if needed. +1. Check the **Also publish API to developer portal** box on the **Developer Portal** section. +1. Select the **Portal Proxy Hostname** using the list. +1. Select **Publish** + +The API and documentation should now be available at the hostname provided for the Developer Portal proxy. + +## Publish the API Documentation Only + +Take the steps below to publish just the API documentation. + +1. Select **Services** on the sidebar. +1. Select **Publish API Doc** from the **Actions** menu. +1. In the **Name** box, type the name for your API Doc. +1. On the **API Spec** section, select your spec using the list. +1. Select the **Portal Proxy Hostname** using the list. +1. Confirm the **Base Path** and **Version** on the **Ingress** section. Update the default values if needed. +1. Select the **Enter an external Hostname** option. +1. On the **External Hostname** section, provide the hostname for your external Target Proxy. +1. Select **Save**. diff --git a/content/nms/acm/how-to/infrastructure/_index.md b/content/nms/acm/how-to/infrastructure/_index.md new file mode 100644 index 000000000..f1345f39b --- /dev/null +++ b/content/nms/acm/how-to/infrastructure/_index.md @@ -0,0 +1,5 @@ +--- +title: Infrastructure +weight: 100 +url: /nginx-management-suite/acm/how-to/infrastructure/ +--- \ No newline at end of file diff --git a/content/nms/acm/how-to/infrastructure/configure-devportal-backend.md b/content/nms/acm/how-to/infrastructure/configure-devportal-backend.md new file mode 100644 index 000000000..ad7ab0b4d --- /dev/null +++ b/content/nms/acm/how-to/infrastructure/configure-devportal-backend.md @@ -0,0 +1,64 @@ +--- +description: Learn how to support various deployment patterns for Developer Portal. +docs: DOCS-955 +tags: +- docs +title: Deployment Patterns for Developer Portal +toc: true +weight: 200 +--- + +{{< shortversions "1.2.0" "latest" "acmvers" >}} + +## Overview + +The Developer Portal application is a combination of a portal application (Developer Portal UI) and a backend API service (Developer Portal API service) to support the application. + +The following deployment patterns are supported: + +- Developer Portal UI and API service deployed on a single host (default). +- Load-balanced backend API using multiple IP addresses. Developer Portal UI and API service deployed on different hosts. +- Load-balanced backend API using a single hostname. Developer Portal UI and API service deployed on different hosts using a single hostname and frontend by a load balancer. + +--- + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with a [Developer Portal]({{< relref "/nms/acm/getting-started/add-devportal" >}}) cluster. +- You have verified that you can access the Developer Portal using the configured hostname. + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +--- + +## Single Host Installation + +With the localhost installation, which is the default setup, both the backend and UI Developer Portal binaries are installed on the same machine. The backend API service is on the localhost, port 8080, by default. + +
    +{{< img src="acm/deployment-patterns/LocalInstall.png" alt="local install" width="400" >}} +
    + +--- + +## Multi-Host Installation for High Availability + +The Developer Portal backend API service can be scaled for high availability by installing the backend binaries on multiple hosts. The Developer Portal front-end load balances the requests between multiple backend services using an IP address or an internal DNS name. + +
    +{{< img src="acm/deployment-patterns/MultipleIP.png" alt="multiple IPs" width="400" >}} +
    + +### Configure Developer Portal Backend + +When creating a Developer Portal in an environment, you can set multiple `serviceTargets` to match any of the deployment patterns above. + +1. In the API Connectivity Manager user interface, go select **Workspaces > Environments > \**, where "your environment" is the Environment that contains the Developer Portal. +1. Select **Edit Advanced Config** from the **Actions** menu for the desired Developer Portal. +1. On the **Backend** tab, select the default backend service, then select **Edit Backend** from the **Actions** menu. +1. Add/Update desired service target. diff --git a/content/nms/acm/how-to/infrastructure/customize-devportal.md b/content/nms/acm/how-to/infrastructure/customize-devportal.md new file mode 100644 index 000000000..20701f0b6 --- /dev/null +++ b/content/nms/acm/how-to/infrastructure/customize-devportal.md @@ -0,0 +1,210 @@ +--- +description: Learn how to customize a Developer Portal and publish documentation using + F5 NGINX Management Suite API Connectivity Manager. +docs: DOCS-900 +tags: +- docs +title: Customize a Developer Portal +toc: true +weight: 300 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +You can use API Connectivity Manager to create and manage Developer Portals (or, "Dev Portals") to host your APIs and documentation. API Connectivity Manager Dev Portals provide a framework for customization that lets you match your Dev Portal to your brand's or business' requirements. +You can customize the Dev Portal website's landing page, All APIs page, and Docs page(s), as well as the site's header and footer. + +### Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with a [Developer Portal]({{< relref "/nms/acm/getting-started/add-devportal" >}}) cluster. +- You have verified that you can access the Developer Portal using the configured hostname. + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +## Customize a Developer Portal {#create-dev-portal} + +API Connectivity Manager uses a Dev Portal framework to define the look and feel of Developer Portals. These settings are applied at the Cluster level and apply to all Developer Portals hosted by the Cluster. + +Take the steps below to customize your Dev Portal by defining a custom Dev Portal framework. + +1. In the API Connectivity Manager user interface, go to **Environments > \**, where "your environment" is the Environment that contains a Developer Portal. +1. Select **Edit Portal Theme** from the **Actions** menu for the desired Developer Portal. You can then edit any of the options provided. + + - [Brand](#brand-options) + - [Style](#style-options) + - [Website](#website-options) + +1. You can save your changes at any time by selecting the **Save and Publish** option. + +{{}} +
    + +- The Preview section to the right of the settings in each section will update automatically as you make changes. +- The changes will be applied immediately when you select **Save and Publish**; as such we recommend testing any changes in a "non-production" Environment first. +{{
    }} + +## Brand Options + +### Add Custom Logo + +1. Select **Upload Image**. +1. Browse your local filesystem and select the file you'd like to upload. +1. Select **Save and Publish** or select **Next** to continue making changes. + +### Add a Favicon + +1. Select **Upload Favicon**. +1. Browse your local filesystem and select the file you'd like to upload. +1. Select **Save and Publish** or select **Next** to continue making changes. + +## Style Options + +### Colors + +You can customize the colors of the following items: + +- page background, +- page text, +- theme (buttons, etc.), and +- callouts (information, success, error, and warning). + +To customize any of the above fields: + +1. Select the field that you want to customize. +1. Enter the hex code for the desired color, or drag the sliders to create and refine a custom color. +1. Select **Save and Publish** or select **Next** to continue making changes. + +### Fonts + +You can customize the font used in any of the following: + +- hero text, +- header and body text, +- code. + +To customize the font used in your Dev Portal(s): + +1. Select the field that you want to customize. +1. Choose a font from the list provided. +1. Select **Save and Publish** or select **Next** to continue making changes. + +## Website Options + +You can customize the following sections of your Dev Portal website: + +- [Header](#header-options) +- [Footer](#footer-options) +- [Homepage](#homepage-options) +- [Documentation](#add-documentation) + +### Header {#header-options} + +You can customize the header's background color, text color, and provide text to appear next to your logo image. + +To customize the background or text color: + +1. Select the field that you want to customize. +1. Enter the hex code for the desired color, or drag the sliders to create and refine a custom color. + +To customize the header text, enter the text you want to use in the **Complementary logo text** field. + +Then, select **Save and Publish** or select **Next** to continue making changes. + +### Footer {#footer-options} + +You can customize the footer's background color, text color, and provide links to appear in the footer. + +To customize the background or text color: + +1. Select the field that you want to customize. +1. Enter the hex code for the desired color, or drag the sliders to create and refine a custom color. + +To add links: + +1. Select **Add Links**. +1. Provide the display text and the target URL. + +To delete links: + +1. Select the "Delete" icon for any link that you want to remove. + +Then, select **Save and Publish** or select **Next** to continue making changes. + +### Homepage {#homepage-options} + +You can customize the following options for your Dev Portal homepage: + +- Hero image/banner text and color +- "About Us" cards: The homepage features three cards, which appear below the banner. +- Steps for getting started with your API. + +To edit the hero image/banner: + +1. Select the **Edit** icon. +1. Enter your desired text for the **Title** and **Secondary Title**. +1. Select the **Background** field and/or **Ink** field, then enter the hex code for the desired color or drag the sliders to create and refine a custom color. +1. Select **Save Changes**. + +To edit the "About Us" cards: + +1. Select the **Edit** icon. +1. Enter your desired **Title** (required), **Description** (required), icon, and **Alt Text** for each card. +1. Select **Save Changes**. + +To edit the **Get Started** steps: + +1. Select the **Edit** icon. +1. Enter your desired **Title** (required), **Description** (required), icon, and **Alt Text** for each of the four steps. +1. Select **Save Changes**. + +> {{< fa "fa-solid fa-lightbulb" >}} At this point, we recommend selecting **Save and Publish** to save any customizations you've made. +> Verify that the changes have been applied, then move on to adding your [**Documentation**](#add-documentation). + +### Documentation {#add-documentation} + +#### Configure Documentation Page + +You can edit the **Documentation Page** section of your Dev Portal website to add custom documentation for your APIs. +You can add or edit up to five Markdown documents. The following placeholder pages are included by default: + +- Get Started +- Authentication +- Responses +- Errors + +To customize the **Documentation** page of your Developer Portal, take the steps below: + +1. Select the **Documentation** option in the **Edit Developer Portal** sidebar. +1. To add a new Markdown document, select **Add Page**. This adds a new blank item to the Pages table, which you can then edit and preview as described below. + + 1. Select the **Edit** icon in the Pages table. + 1. Edit the sample text, or paste your text into the editor. + 1. Select **Preview** to view the Markdown rendered as HTML. + 1. Provide a new **Page Name**, if desired. + 1. Select **Save** to save your changes. + +1. To reorder your documents, select the up or down arrow next to the Page name. + +When ready, select **Save and Publish** to save all of your changes and publish your documentation. + +#### Configure All APIs Page + +The **All APIs** page is where all of your APIs will appear on your Dev Portal site. +To customize the **Configure All APIs** page of your Developer Portal, take the steps below: + +1. Select the **Documentation** option in the **Edit Developer Portal** sidebar. +1. Select the **Edit** icon for the **Configure All APIs** section. +1. Add your **Page Description** (required). +1. Select **Upload illustration** to add a new full-width image to the page. Then, browse your local filesystem and select the file you'd like to upload. +1. To change the image background color, select the **Illustration Background** field. Then, then enter the hex code for the desired color or drag the sliders to create and refine a custom color. +1. Add the desired **Alt Text** for the image to the field provided. +1. Select **Save Changes**. + +When ready, select **Save and Publish** to save and publish your **All APIs** page changes. diff --git a/content/nms/acm/how-to/infrastructure/enable-create-credentials.md b/content/nms/acm/how-to/infrastructure/enable-create-credentials.md new file mode 100644 index 000000000..4b718eb6e --- /dev/null +++ b/content/nms/acm/how-to/infrastructure/enable-create-credentials.md @@ -0,0 +1,149 @@ +--- +description: Follow the steps in this guide to allow users to create credentials as + a self-service workflow on the Developer Portal. +docs: DOCS-947 +doctypes: +- task +tags: +- docs +title: Enable Creating Credentials on the Developer Portal +toc: true +weight: 400 +--- + +{{< raw-html >}} + +{{< /raw-html >}} +## Overview + +API Connectivity manager supports public API workflows. Public APIs are open for anyone to consume by requesting resource credentials. Resource credentials can be managed on the Developer Portal for public APIs secured with APIKey or Basic Authentication. Consumers have to log in to the Developer Portal to create credentials. Once created, credentials can be used to access APIs. Users can also use the credentials to test APIs on the Developer Portal with the **Try It Out** feature. + +### Before You Begin + +To complete the steps in this guide, you need to the following: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more environments with [API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}) or [Dev Portal]({{< relref "/nms/acm/getting-started/add-devportal" >}}) clusters. + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +--- + +## Self-Managed Credentials Workflow + +On the Developer Portal, logged-in users can create credentials for public APIs. Since this workflow is available only for logged-in users, the OIDC policy must be applied on the Developer Portal to enable SSO with an IDP of choice. In addition, the API proxies should be secured with either Basic Authentication or APIKey authentication policy. + +The process for setting up end-to-end credentials is as follows: + +- Enable the Credentials endpoint on the API Connectivity Manager host +- Enable SSO on the Developer Portal with an OIDC policy +- Publish the API Proxy and secure it by adding an APIKey or Basic Authentication policy + +Afterward, the API consumer can create credentials on the Developer Portal by performing the following: + +- API consumer logs in to developer portal, creates org, app, and credentials for the API. +- Test the API with the **Try It Out** option and the newly created credentials. + +### Enable Create Credentials Endpoint + +As mTLS is not enabled by default, the Credentials endpoint is disabled initially. You must enable the Credentials endpoint on the API Connectivity Manager host to use the Developer Portal credentials workflow. + +{{}}mTLS is essential to secure communication between API Connectivity Manager and the Developer Portal.{{}} + +To enable the Credentials endpoint on the API Connectivity Manager host, take the following steps: + +1. Make sure mTLS server and client certificates have been configured for Devportal to F5 NGINX Management Suite by following these [instructions]({{< relref "/nms/acm/how-to/devportals/installation/install-dev-portal.md#secure-communication-from-the-developer-portal-to-nginx-management-suite-host-with-mtls" >}}) to add your server certs, CA file and enforce mTLS. + +1. Open an SSH connection into the API Connectivity Manager host and log in. + +1. Enable the Credentials endpoint: + + Open `/etc/nms/nginx/locations/nms-acm.conf` for editing and uncomment the location block. + + ``` yaml + # Deployment of resource credentials from the devportal + # Uncomment this block when using devportal. Authentication is disabled + # for this location. This location block will mutually + # verify the client trying to access the credentials API. + location = /api/v1/devportal/credentials { + # OIDC authentication (uncomment to disable) + #auth_jwt off; + auth_basic off; + error_page 401 /401_certs.json; + if ($ssl_client_verify != SUCCESS) { + return 401; + } + proxy_pass http://acm-api-service/api/acm/v1/devportal/credentials; + } + ``` + +1. Save the changes. + +1. Reload NGINX on the API Connectivity Manager host: + + ```bash + sudo nginx -s reload + ``` + +### Enable SSO on the Developer Portal + +1. Follow the instructions to [enable single sign-on (SSO) for the Developer Portal]({{< relref "/nms/acm/how-to/infrastructure/enable-sso-devportal.md" >}}) with an OIDC policy. + +### Publish and Secure the API Proxy + +A link to **Edit Advanced Configurations** is displayed upon publishing the API Proxy. If you want to add policies, this is where to do that. + +To add an APIKey Authentication policy: + +1. Select **Policies** in the advanced section of the menu. +2. Select **Add Policy** for the APIKey Authentication policy, then complete the required information in the form. +3. (Optional) To quickly test the setup, you can create a test credential. Add a credential by selecting **Add APIKey** and specifying **Client ID** and **APIKey**. +4. Select **Add Policy**. +5. Select **Save and Publish**. + +#### Add a CORS Policy + +Depending on the domain, you might need to add a CORS policy to the API proxy in order to use the **Try It Out** feature on the Developer Portal. + +To add a CORS policy: + +1. Select the **Policies** menu item in the advanced section of the menu. +2. Select **Add Policy** for the CORS policy. then complete the required information in the form. +3. Add the header used in the APIKey policy above to the **OPTIONS** request. +4. Select **Add Policy**. +5. Select **Save and Publish**. + +### Create Credentials + +Log in to the Developer Portal as an API Consumer. Use the **Create Credentials** option to create credentials for the API. + +{{}} + + To avoid misuse, the API Consumer may create only one APIKey per API. + +{{}} + +### Try It Out on the Developer Portal + +Once the credentials have been created and are available, you can use the **Try It Out** feature on the Developer Portal to test the API using the newly created credentials. diff --git a/content/nms/acm/how-to/infrastructure/enable-sso-devportal.md b/content/nms/acm/how-to/infrastructure/enable-sso-devportal.md new file mode 100644 index 000000000..04dd67fc3 --- /dev/null +++ b/content/nms/acm/how-to/infrastructure/enable-sso-devportal.md @@ -0,0 +1,176 @@ +--- +description: Learn how to enable Single Sign-On for Developer Portal. +docs: DOCS-928 +doctypes: +- task +tags: +- docs +title: Enable Single Sign-On for Developer Portal +toc: true +weight: 400 +--- + +{{< raw-html >}} + +{{< /raw-html >}} + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +Single sign-on (SSO) can be enabled on the Developer Portal to secure access to the portal and to allow authenticated API consumers to manage resource credentials. Logged-in consumers can then self-manage resource credentials for the APIs. + +Single sign-on is enabled by applying an OpenID Connect (OIDC) policy on the Developer Portal. The OIDC policy sets up the portal proxy to act as a relying party to authenticate users with the OIDC provider. + +### Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. See [API Connectivity Manager Install Guide]({{< relref "/nim/deploy/_index.md" >}}). +- You have one or more Environments with [API Gateways]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}) or [Developer Portals]({{< relref "/nms/acm/getting-started/add-devportal" >}}). + +### Terminology + +The following terminology is used in this topic: + +{{}} + +| Term | Description | +|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| AuthCode | Authorization Code. | +| IDP | Identity Provider stores and verifies a user's identity as a service. | +| IDP Authorization Server | The IDP Authorization Server authenticates and issues access tokens to users. | +| OAuth | OAuth is an open-standard authorization protocol. | +| OIDC | OpenID Connect is an authentication protocol that adds an identity verification layer. | +| PKCE | Proof Key for Code Exchange. When public clients request Access Tokens, some additional security concerns are posed that are not mitigated by the Authorization Code Flow alone. PKCE needs the clients app to provide proof to the authorization server, to verify that the authorization code belongs to the clients' app. | +| URI | Uniform Resource Indicator. It is a unique character sequence which distinguishes one resource from another. | + +{{}} + +### Supported OIDC Identity Providers + +API Connectivity Manager supports all of the same identity providers as F5 NGINX Plus. The following guides describe how to configure NGINX Plus for these identity providers, and outline where to find the information you'll need to configure them for OIDC. + +- [Auth0](/nginx/deployment-guides/single-sign-on/auth0/) +- [Amazon Cognito](/nginx/deployment-guides/single-sign-on/cognito) +- [Keycloak](/nginx/deployment-guides/single-sign-on/keycloak) +- [Microsoft Active Directory FS](/nginx/deployment-guides/single-sign-on/active-directory-federation-services) +- [Okta](/nginx/deployment-guides/single-sign-on/okta) +- [OneLogin](/nginx/deployment-guides/single-sign-on/onelogin) +- [Ping Identity](/nginx/deployment-guides/single-sign-on/ping-identity) + +## Set up OIDC Policy + +You can set up OIDC policy by using either the web interface or the REST API. + +### Updating OIDC Policy + +{{}} + {{%tab name="Web Interface"%}} + +1. In the API Connectivity Manager user interface, go to **Infrastructure > Workspaces > Environments** and select the **Edit Advanced Config** from the **Actions** menu for the cluster you want to set up. +2. Select the **Global Policies** tab. +3. For **OpenID Connect Relying Party** select **Add Policy** from the policy's **Actions** menu. +4. Update **Application Settings**. + +{{< include "acm/how-to/update-application-settings.md" >}} + +5. Update **Authorization Server Settings** + +{{< include "acm/how-to/update-authorization-server-settings.md" >}} + +6. Update **General Settings** + +{{< include "acm/how-to/update-general-settings.md" >}} + +7. Update **Custom Error Handling**. + + You can customize how the proxy should handle the following error conditions: + + - when Client ID is not supplied + - when there is no match for the Client ID + + Specify the HTTP error code in the box next to the error condition. The specified error code will be displayed when the related error condition is true. + +8. Select **Add**. +9. Select **Save and Submit** your changes. + + {{%/tab%}} + {{%tab name="REST API"%}} + +1. Send a POST request to add the OIDC policy to the cluster. + + + {{}} + + | Method | Endpoint | + |-------------|----------| + | POST | `/api/v1/infrastructure/workspaces/{{proxyWorkspaceName}}/environments`| + + + + {{}} + + + ```json + { + "name": "test", + "type": "NON-PROD", + "functions": [ + "DEVPORTAL" + ], + "systemProperties": { + "acmHostName": "" + }, + "proxies": [...], + "policies": { + "oidc-authz": [ + { + "action": { + "config": { + "jwksURI": "https:///v1/keys", + "tokenEndpoint": "https:///v1/token", + "userInfoEndpoint": "https:///v1/userinfo", + "authorizationEndpoint": "https:///v1/authorize", + "logOffEndpoint": "https:///v1/logout", + "authFlowType": "PKCE" + } + }, + "data": [ + { + "appName": "Myapp", + "clientID": "", + "scopes": "apigw+openid+profile+email+offline_access" + } + ] + } + ] + } + } + ] + } + ``` + + {{%/tab%}} +{{}} + +Single sign-on (SSO) is enabled on the Developer Portal after configuring the OIDC policy. Application developers can log in through the configured centralized identity provider (IDP). After a successful login, they can create resource credentials for the available APIs. + +## Known Limitation with the policy + +The OIDC policy does not yet support custom DNS for resolution. Only external DNS resolution is supported. diff --git a/content/nms/acm/how-to/infrastructure/manage-api-infrastructure.md b/content/nms/acm/how-to/infrastructure/manage-api-infrastructure.md new file mode 100644 index 000000000..113231fab --- /dev/null +++ b/content/nms/acm/how-to/infrastructure/manage-api-infrastructure.md @@ -0,0 +1,196 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to manage + your API infrastructure. +docs: DOCS-924 +doctypes: +- task +tags: +- docs +title: Manage API Infrastructure +toc: true +weight: 100 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +API Connectivity Manager lets you manage your API infrastructure by using a set of hierarchical resources. The top-level resource, called a **Workspace**, provides a logical grouping for resources called **Environments**. Environments contain **Clusters** that allocate NGINX instances for use as API Gateways and Developer Portals. + +You can use Workspaces to create isolated work areas for business units or teams. You can use Environments to allocate infrastructure resources for use within a team's Workspace. + +This guide provides instructions for using API Connectivity Manager Workspaces and Environments to manage your API infrastructure. + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, running, and licensed. +- You have SSH access to the host where API Connectivity Manager is running and can use the `sudo` command. +- You have installed a [supported version]({{< ref "tech-specs" >}}) of F5 NGINX Plus on each host that you want to add to a Cluster. +- You know the IP address or FQDN for each host that you want to add to a cluster. +- You have SSH access to each of the hosts that you want to allocate to a cluster and can use the `sudo` command. +- You have installed the [`njs`](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) module on each host that you want to add to the cluster. + +## How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui.md" >}} + +## Create a Workspace {#create-workspace} + +{{}} + +{{%tab name="UI"%}} + +Take the steps below to create a new Workspace. + +1. From the API Connectivity Manager **Infrastructure** landing page, select **Create Workspace**. +1. In the **Create Workspace** drawer, provide a **Name** and **Description**. + + - **Name**: (required) A name can be any combination of lowercase letters, hyphens, numbers, or underscores. Spaces and capital letters are not allowed. + - **Description**: (optional; 150-character limit) The description should help others in your organization understand the nature or purpose of the Workspace. + +1. (Optional) Select the **Contact Information** box to designate someone as the Workspace's owner. Then, provide the following information: + + - **Contact Name** + - **Contact Email** + - **Slack**: The contact's Slack handle + +1. Select **Create** to save your changes. + +The **Create Workspace** drawer will display a confirmation when the Workspace has been created. From there, you can go on to [Add an Environment](#Add-an-environment) or go back to the Workspaces landing page. + +{{%/tab%}} +{{%tab name="API"%}} + + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST| `/infrastructure/workspaces`| + +{{}} + + +```json +{ + "name": "{{infraWorkspaceName}}", + "metadata": { + "description": "Petstore Team Workspace" + }, + "contactDetails": { + "adminEmail": "admin@example.com", + "adminName": "I.M. Admin", + "adminPhone": "555 123 1234" + } +} +``` + +{{%/tab%}} +{{}} +## Add an Environment {#add-environment} +{{}} + +{{%tab name="UI"%}} + +After creating a Workspace, you must create at least one Environment. When creating an Environment, you will also create the Clusters where your API Gateway(s) and/or Developer Portal(s) will reside. + +{{}} + +- Do not add the same host to both an API Gateway cluster and a Developer Portal cluster. +- The Developer Portal cluster requires at least one dedicated host. +{{}} + +Take the steps below to add an Environment. + +1. On the **Workspaces** landing page, select the ellipsis (`...`) icon for your desired Workspace. +1. Select **Add Environment**. +1. In the **Add Environment** drawer, provide the requested information: + - **Name** (required) + - **Description** (optional) + - **Type**: **Production** (**prod**) or **Non-Production** (**non-prod**) +1. In the **API Gateways** section, provide the **Name** and **Hostname** of at least one instance that you want to add to the cluster. + + This instance, or instance group, will host the API Gateway. +1. (Optional) In the **Developer Portals** section, provide the **Name** and **Hostname** of at least one instance that you want to add to the cluster. + + This instance, or instance group, will host the Developer Portal. + + {{}}The Dev Portal requires a separate, dedicated host. Do not install the Dev Portal on a host that is already running the management or data planes.{{}} +1. Select the **Create** button to create the Environment. The **Add Environment** drawer will display a confirmation when the Environment has been created. +1. Copy the `cURL` or `wget` command shown in the confirmation drawer and save it -- you will need to use this information to [add your NGINX instances to the cluster](#register-nginx-instance). + +{{%/tab%}} +{{%tab name="API"%}} +{{}} + +| Parameter | Description | +|:-----------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `proxies.proxyClusterName` | The group of NGINX instances where configuration will be written | +| `proxies.hostnames` | An IP Address or fully qualified domain name (FQDN) used to identify the API-Gateway environment| + +{{}} + + + +{{}} + +| Method | Endpoint | +|-------------|----------| +| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| + +{{}} + + +```json +{ + "name": "{{environmentname}}", + "proxies": [ + { + "proxyClusterName": "{{proxyClusterName}}", + "hostnames": [ + "{{environmentHostname}}" + ] + } + ] +} +``` + +{{%/tab%}} +{{}} +## Onboard an NGINX Instance {#register-nginx-instance} + +[Install the NGINX Agent]({{< relref "/nms/nginx-agent/install-nginx-agent" >}}) on each host to register the instance with API Connectivity Manager as part of the cluster. + +Take the steps below to add an NGINX instance to an API Gateway. + +1. Use SSH to log in to the host machine. +1. Run the `cURL` or `wget` install command that was displayed in the **Environment Created** confirmation drawer. +1. When the installation is complete, the instance will appear in the **Instances** list for the cluster in the API Connectivity Manager user interface. +1. After running the `cURL` command you can check the environment job status on the environments page +{{Environment Onboarding Status.}} + + +## Environment Statuses +{{}} + + +| Status | Description | +|:-----------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Configuring` | ACM have received the changes and will attempt to deploy to the instance group | +| `Pending` | Check that instance group contains instances, see Instance Groups Overview page | +| `Fail` | Deploying configurations have failed, review the Events page for more details | +| `Success` | Changes have been successfully deployed to the instance group | + +{{}} + +> {{< fa "fa-solid fa-circle-question" >}} **Lost your install command?** +> +> Don't worry! You can take the steps below to recover it: +> +> 1. In the API Connectivity Manager user interface, go to **Infrastructure > Environments > \**. +> 1. Click anywhere in the row of the Cluster that you want to add an instance to. +> 1. The **Onboarding Commands** will be shown in the cluster details drawer. + diff --git a/content/nms/acm/how-to/infrastructure/publish-developer-portal.md b/content/nms/acm/how-to/infrastructure/publish-developer-portal.md new file mode 100644 index 000000000..4b55016cb --- /dev/null +++ b/content/nms/acm/how-to/infrastructure/publish-developer-portal.md @@ -0,0 +1,90 @@ +--- +description: Learn how to use the F5 NGINX Management Suite API Connectivity Manager + web interface to create, update, or delete a Developer Portal. +docs: DOCS-901 +doctypes: +- task +tags: +- docs +title: Publish a Developer Portal +toc: true +weight: 300 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +You can use API Connectivity Manager to create and publish Developer Portals (or, "Dev Portals") to host your APIs and documentation. API Connectivity Manager Dev Portals provide a [framework for customization]({{< relref "/nms/acm/how-to/infrastructure/customize-devportal.md" >}}) that lets you match your Dev Portal to your brand's or business' requirements. + +You can also modify and delete your Developer Portals using API Connectivity Manager. + +### Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- Your [Infrastructure]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md" >}}) has one or more Environments with a [Developer Portal]({{< relref "/nms/acm/getting-started/add-devportal" >}}) cluster. +- You have verified that you can access the Developer Portal using the configured hostname. + + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +## Create a Developer Portal + +### Create the Services Workspace + +1. Under **Modules**, select **API Connectivity Manager**. +1. On the sidebar, select **Services**. +1. On the **Services - Workspaces** section, select **Create Workspace**. +1. In the **Create Workspace** drawer, provide a **Name** and **Description**. + - **Name**: (required) A name can be any combination of lowercase letters, hyphens, numbers, or underscores. Spaces and capital letters are not allowed. + - **Description**: (optional; 150-character limit) The description should help others in your organization understand the nature or purpose of the Workspace. +1. (Optional) Select the **Contact Information** box to designate someone as the Workspace's owner. Then, provide the following information: + + - **Contact Name** + - **Contact Email** + - **Slack**: The contact's Slack handle + +1. Select **Create** to save your changes. + + +## Modify Developer Portal Resources + +### Edit Workspace Description and Contact Information + +1. On the sidebar, select **Services**. +1. Select the ellipsis button next to your workspace on the **Actions** column. +1. Select **Edit Workspace**. +1. Update the **Description** and **Workspace Contact Information** as needed. +1. Select **Save**. + +## Delete Developer Portal Resources + +### Remove a Developer Portal from an API Proxy + +1. On the sidebar, select **Services**. +1. Select your workspace from the list. +1. On the **API Proxies** section, select the ellipsis button next to your API Proxy in the **Actions** column. +1. Select **Edit Proxy**. +1. On the **Basic > Configuration** section, uncheck **Also publish API to developer portal**. +1. Select **Save & Publish**. + +### Delete API Docs + +1. On the sidebar, select **Services**. +1. Select your workspace from the list. +1. On the **API Docs** section, select the ellipsis button next to your API Doc in the **Actions** column. +1. Select **Delete API Doc**. +1. Select **Delete** to confirm the action. + +### Delete Services Workspaces + +{{}}To delete a Workspace, you must delete all the API Proxies and API Docs belonging to a Services Workspace.{{}} + +1. On the sidebar, select **Services**. +1. Select the ellipsis button next to your workspace in the **Actions** column. +1. Select **Delete workspace**. +1. Select **Delete** to confirm the action. diff --git a/content/nms/acm/how-to/install-acm-offline.md b/content/nms/acm/how-to/install-acm-offline.md new file mode 100644 index 000000000..d164d3585 --- /dev/null +++ b/content/nms/acm/how-to/install-acm-offline.md @@ -0,0 +1,204 @@ +--- +title: "Offline Installation Guide" +docs: "DOCS-1669" +--- + +## Install or Upgrade API Connectivity Manager {#install-or-upgrade-acm-offline} + +{{< eol-call-out "warning" "End of Sale Notice:" >}} +F5 NGINX is announcing the **End of Sale (EoS)** for NGINX Instance Manager API Connectivity Manager Module, **effective January 1, 2024**. + +F5 maintains generous lifecycle policies that allow customers to continue support and receive product updates. Existing API Connectivity Manager Module customers can continue to use the product past the EoS date. **License renewals are not available after September 30, 2024**. + +See our [End of Sale announcement](https://my.f5.com/manage/s/article/K000137989) for more details. +{{< /eol-call-out >}} + +### Dependencies with Instance Manager {#acm-nim-dependencies} + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +### Install API Connectivity Manager {#install-acm} + +{{< important >}} +API Connectivity Manager requires Instance Manager to be installed first. + +Before you begin: + +1. Review the [Dependencies with Instance Manager](#acm-nim-dependencies) table above. +2. [Install a compatible version of Instance Manager](#install-nim-offline). +{{< /important>}} + +  + +{{}} +{{%tab name="CentOS, RHEL, and RPM-Based"%}} + +To install API Connectivity Manager, take the following steps: + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the API Connectivity Manager package files. + +2. Install the API Connectivity Manager package: + + ```bash + sudo rpm -ivh --nosignature /home//nms-api-connectivity-manager_.x86_64.rpm + ``` + +{{%/tab%}} +{{%tab name="Debian, Ubuntu, and Deb-Based"%}} + +To install API Connectivity Manager, take the following steps: + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the API Connectivity Manager package files. + +2. Install the API Connectivity Manager package: + + ```bash + sudo apt-get install -f /home//nms-api-connectivity-manager__amd64.deb + ``` + + +{{%/tab%}} +{{}} + +3. Enable and start the API Connectivity Manager service: + + ```bash + sudo systemctl enable nms-acm --now + ``` + + F5 NGINX Management Suite components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. + +4. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +### Post-Installation Steps {#acm-post-install-steps} + +{{< include "installation/optional-installation-steps.md" >}} + +See these topics below for instructions on how to access the web interface and add your license: + +- [Access the web interface](#access-web-ui) +- [Add a license](#add-license) + +### Upgrade API Connectivity Manager {#upgrade-acm-offline} + +{{}} +{{%tab name="CentOS, RHEL, and RPM-Based"%}} + +To upgrade API Connectivity Manager to a newer version, take the following steps: + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the API Connectivity Manager package file. + +2. Upgrade the API Connectivity Manager package: + + ```bash + sudo rpm -Uvh --nosignature /home/user/nms-api-connectivity-manager_.x86_64.rpm + ``` + +{{%/tab%}} +{{%tab name="Debian, Ubuntu, and Deb-Based"%}} + +To upgrade API Connectivity Manager to a newer version, take the following steps: + +1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the API Connectivity Manager package file. + +2. Upgrade the API Connectivity Manager package: + + ```bash + sudo apt-get -y install -f /home/user/nms-api-connectivity-manager__amd64.deb + ``` + +{{%/tab%}} +{{}} + + +3. Restart the NGINX Management Suite platform services: + + ```bash + sudo systemctl restart nms + ``` + + NGINX Management Suite components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. + +4. Restart the API Connectivity Manager service: + + ```bash + sudo systemctl restart nms-acm + ``` + +5. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +6. (Optional) If you use SELinux, follow the steps in the [Configure SELinux]({{< relref "/nim/system-configuration/configure-selinux.md" >}}) guide to restore SELinux contexts (`restorecon`) for the files and directories related to NGINX Management Suite. + + +### Set Up the Data Plane {#acm-offline-dependencies} + +The API Connectivity Manager data plane requires [NGINX Plus R24](https://docs.nginx.com/nginx/) or later and [njs](https://nginx.org/en/docs/njs/).. + +1. Log in to MyF5 and download your `nginx-repo.crt` and `nginx-repo.key` files. +2. Copy the `nginx-repo.crt` and `nginx-repo.key` files to the `/etc/ssl/nginx/` directory: + + ```bash + sudo cp nginx-repo.crt /etc/ssl/nginx/ + sudo cp nginx-repo.key /etc/ssl/nginx/ + ``` + +3. Select the following link to download the `fetch-external-acm-dataplane-dependencies.sh` script. This script downloads the necessary NGINX Plus and njs packages to a `tar.gz` archive. + + {{}} {{}} + +4. To download the NGINX Plus and njs dependencies, run the `fetch-external-acm-dataplane-dependencies.sh` script. As parameters, specify your Linux distribution and the location of your `nginx-repo.crt` and `nginx-repo.key` files. + + ```bash + sudo bash fetch-external-acm-dataplane-dependencies.sh /etc/ssl/nginx/nginx-repo.crt /etc/ssl/nginx/nginx-repo.key + ``` + + Supported Linux distributions: + + - `ubuntu18.04` + - `ubuntu20.04` + - `debian10` + - `debian11` + - `centos7` + - `centos8` + - `rhel7` + - `rhel8` + - `amzn2` + + For example, to download external dependencies for Ubuntu 20.04: + + ```bash + sudo bash fetch-external-acm-dataplane-dependencies.sh ubuntu20.04 /etc/ssl/nginx/nginx-repo.crt /etc/ssl/nginx/nginx-repo.key + ``` + + In this example, the script creates an archive called `acm-dataplane-dependencies-ubuntu20.04.tar.gz` with the external dependencies. + +5. After you copy and extract the bundle onto your target machine, take the following steps to install the packages: + + {{< note >}}The bundled NGINX Plus package may conflict with installed versions of NGINX Plus. Delete the package from the bundle if you want to keep the existing version.{{< /note >}} + + {{}} + {{%tab name="CentOS, RHEL, and RPM-Based"%}} + +```bash +tar -kzxvf acm-dataplane-dependencies-.tar.gz +sudo rpm -ivh *.rpm +``` + + {{%/tab%}} + {{%tab name="Debian, Ubuntu, and Deb-Based"%}} + +```bash +tar -kzxvf acm-dataplane-dependencies-.tar.gz +sudo dpkg -i ./*.deb +``` + +{{%/tab%}} +{{}} diff --git a/content/nms/acm/how-to/install-acm.md b/content/nms/acm/how-to/install-acm.md new file mode 100644 index 000000000..c52dd941b --- /dev/null +++ b/content/nms/acm/how-to/install-acm.md @@ -0,0 +1,164 @@ +--- +description: Follow the steps in this guide to install or upgrade F5 NGINX Management + Suite API Connectivity Manager. +docs: DOCS-1213 +doctypes: +- tutorial +layout: acm-eos +tags: +- docs +title: Install or Upgrade API Connectivity Manager +toc: true +weight: 10 +--- + +--- + +## Before You Begin + +### Security Considerations + +{{< include "installation/secure-installation.md" >}} + +### Installation Prerequisites + +{{< include "installation/nms-prerequisites.md" >}} + +### Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +--- + +## Install API Connectivity Manager + +{{}} + +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +1. To install the latest version of API Connectivity Manager, run the following command: + + ```bash + sudo yum install -y nms-api-connectivity-manager + ``` + +{{%/tab%}} + +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +1. To install the latest version of API Connectivity Manager, run the following commands: + + ```bash + sudo apt-get update + sudo apt-get install nms-api-connectivity-manager + ``` + +{{%/tab%}} + +{{}} + +2. Enable and start the F5 NGINX Management Suite services: + + ```bash + sudo systemctl enable nms nms-core nms-dpm nms-ingestion nms-integrations nms-acm --now + ``` + + NGINX Management Suite components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. + +3. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +### Post-Installation Steps + +{{< include "installation/optional-installation-steps.md" >}} + +### Accessing the Web Interface + +{{< include "installation/access-web-ui.md" >}} + +### Add License + +A valid license is required to make full use of all the features in API Connectivity Manager. + +Refer to the [Add a License]({{< relref "/nim/admin-guide/license/add-license.md" >}}) topic for instructions on how to download and apply a trial license, subscription license, or Flexible Consumption Program license. + +--- + +## Upgrade API Connectivity Manager {#upgrade-acm} + +{{}}When you confirm the upgrade, the upgrade process will automatically upgrade dependent packages as needed, including Instance Manager. If you prefer to [back up NGINX Management Suite]({{< relref "/nim/admin-guide/maintenance/backup-and-recovery.md" >}}) before upgrading, you can cancel the upgrade when prompted.{{}} + +
    + +{{}} +{{%tab name="CentOS, RHEL, RPM-Based"%}} + +1. To upgrade to the latest version of API Connectivity Manager, run the following command: + + ```bash + sudo yum update -y nms-api-connectivity-manager + ``` + +{{%/tab%}} + +{{%tab name="Debian, Ubuntu, Deb-Based"%}} + +1. To upgrade to the latest version of the API Connectivity Manager, run the following command: + + ```bash + sudo apt-get update + sudo apt-get install -y --only-upgrade nms-api-connectivity-manager + ``` + +{{%/tab%}} +{{}} + +2. Restart the NGINX Management Suite platform services: + + ```bash + sudo systemctl restart nms + ``` + + NGINX Management Suite components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. + +3. Restart the API Connectivity Manager service: + + ```bash + sudo systemctl restart nms-acm + ``` + +4. Restart the NGINX web server: + + ```bash + sudo systemctl restart nginx + ``` + +5. (Optional) If you use SELinux, follow the steps in the [Configure SELinux]({{< relref "/nim/system-configuration/configure-selinux.md" >}}) guide to restore the default SELinux labels (`restorecon`) for the files and directories related to NGINX Management Suite. + +--- +## What's Next + +### Set Up the Data Plane + +API Connectivity Manager requires one or more data plane hosts for the API Gateway. + +Complete the following steps for each data plane instance you want to use with API Connectivity Manager: + +1. [Install NGINX Plus R24 or later](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) +2. [Install NGINX njs module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) +3. [Install the NGINX Agent]({{< relref "/nms/nginx-agent/install-nginx-agent.md" >}}) on your data plane instances to register them with NGINX Management Suite. + +### Install the Developer Portal + +- [Install the Developer Portal]({{< relref "/nms/acm/how-to/devportals/installation/install-dev-portal.md" >}}) + +### Install Other NGINX Management Suite Modules + +- [Install Security Monitoring]({{< relref "/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md" >}}) + +### Get Started with API Connectivity Manager + +- [Create Workspaces and Environments for your API Infrastructure]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md" >}}) diff --git a/content/nms/acm/how-to/policies/_index.md b/content/nms/acm/how-to/policies/_index.md new file mode 100644 index 000000000..d68daee96 --- /dev/null +++ b/content/nms/acm/how-to/policies/_index.md @@ -0,0 +1,5 @@ +--- +title: Policies +weight: 1000 +url: /nginx-management-suite/acm/how-to/policies/ +--- \ No newline at end of file diff --git a/content/nms/acm/how-to/policies/access-control-routing.md b/content/nms/acm/how-to/policies/access-control-routing.md new file mode 100644 index 000000000..59df2aeb0 --- /dev/null +++ b/content/nms/acm/how-to/policies/access-control-routing.md @@ -0,0 +1,111 @@ +--- +description: Learn how to restrict access to your application servers based on JWT + claims or header values. +docs: DOCS-1265 +doctypes: +- task +tags: +- docs +title: Access Control Routing +toc: true +weight: 300 +--- + +{{< shortversions "1.3.0" "latest" "acmvers" >}} + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with [API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}) or [Dev Portal]({{< relref "/nms/acm/getting-started/add-devportal" >}}) clusters. +- You have published one or more [API Gateways or Developer Portals]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}) with either JSON Web Token Assertion or OAuth2 Introspection enabled. + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +### How to Access the REST API + +{{< include "acm/how-to/access-acm-api" >}} + +## Create Access Control Routing Policy + +Take the steps in this section if you would like to restrict access to Advanced Routes or HTTP methods based on either request headers or JWT tokens. + +{{}} + {{%tab name="UI"%}} + +1. In the API Connectivity Manager user interface, go to **Services > \**, where "your workspace" is the workspace that contains the API Proxy. +1. Select **Edit Proxy** from the Actions menu for the desired API Proxy. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu. +1. Select **Add route** to configure a rule. Select one or more keys and approved values which will be checked before allowing the end user access to the API. Optionally select an Advanced Route or list of HTTP methods which will restrict the Access Control check to requests which match that configuration. +1. Optionally set the return code, which should be returned to requests which do not satisfy the condition specified. + + + {{%/tab%}} + {{%tab name="API"%}} + +```json +"policies": { + "access-control-routing": [ + { + "action": { + "conditions": [ + { + "allowAccess": { + "httpMethods": ["GET"] + }, + "when": [ + { + "key": "token.role", + "matchType": "STRING", + "matchOneOf": { + "values": [ + "admin" + ] + } + }, + { + "key": "token.sub", + "matchType": "REGEX", + "matchOneOf": { + "values": [ + "^.*test.com" + ] + } + } + ] + } + ] + } + } + ] +``` + + {{%/tab%}} +{{}} + +{{< note >}} + +- Any requests which do not match a specified condition will be allowed to access the API Gateway or Developer Portal. Adding a rule with no route or HTTP method specified means that +- Adding multiple match conditions in a rule requires that all conditions are matched in order to access the API. +- Adding the same configuration of route and HTTP method to multiple rules will be treated as an OR condition. +- Any requests which match multiple rules will be checked from most to least specific. +- If `matchType` is not specified, `STRING` will be used. +- If the token claim is an array value, `STRING` and `REGEX` behave differently. + - `STRING` will match if any of the values contained in the array match one of the values. + - `REGEX` will check against the array converted to a comma-separated string. For example, `[ "first", "second", "third" ]` will become `first,second,third` when the regular expression is checked against it. + +{{< /note >}} + +## Verification + +1. Attempt to contact the API Gateway or Developer Portal from a client +1. Contact the IP address from an allowed IP address. The traffic should not be denied. + + diff --git a/content/nms/acm/how-to/policies/advanced-security.md b/content/nms/acm/how-to/policies/advanced-security.md new file mode 100644 index 000000000..1dfbcdbd6 --- /dev/null +++ b/content/nms/acm/how-to/policies/advanced-security.md @@ -0,0 +1,208 @@ +--- +description: Learn how to add an F5 NGINX App Protect WAF policy to your environment + by using the Advanced Security policy in NGINX Management Suite API Connectivity + Manager. +docs: DOCS-1264 +doctypes: +- API Connectivity Manager +- api management +- concept +tags: +- docs +title: Advanced Security +toc: true +weight: 350 +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro" >}} + +--- + +## About Advanced Security Policy + +Use the *Advanced Security* policy to add a pre-defined F5 NGINX App Protect to your deployment. Doing so will apply the rules specified in the policy to your APIs. +This will allow enforcement of rules to *Block* or *Monitor* security events triggering those violations set out in the policy. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} +{{< include "acm/how-to/policies/api-owner-persona.md">}} + +--- + +## Before You Begin + +To complete the steps in this guide, you need the following: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more [Environments with an API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}). +- You have [published one or more API Gateways]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}). +- You have [installed and set up NGINX App Protect]({{< relref "/nap-waf/v4/admin-guide/install-nms" >}}). +- NGINX Management Suite Security Monitoring is [installed]({{< relref "/nim/monitoring/security-monitoring/deploy/install-security-monitoring.md" >}}) and running. + +--- + +## Policy Settings + +The applied policy is configurable, and all events created by rule violations will go to the `Security Monitoring` dashboard in NGINX Management Suite. + +To create a new policy or modify an existing policy, you can navigate to the *App Protect* area of the NGINX Management Suite. + +*NGINX App Protect* policies can also contain a reference to an Open API Specification which will enable payload schema validation on the dataplane instance. + +{{< note >}} + +For information on how to configure an *App Protect* policy, please visit - [Configure NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#policy-configuration-overview) + +To create an NGINX App Protect WAF policy to use in your Advanced Security policy, please see the [Create a Policy]({{< relref "/nim/nginx-app-protect/manage-waf-security-policies#create-security-policy" >}}) documentation. +{{< /note >}} + +--- + +## Applying the Policy + +*NGINX App Protect* policies can be applied to both *Environments* and *Proxies*, allowing for granular control. + +Should you wish to configure a global monitoring policy (non-blocking), but require blocking on only a subset of your API endpoints, you can apply a monitoring policy to your environment and a blocking policy on the proxy you have deployed to that environment. + +This means that only the specific *Proxy* that you have applied the policy to will be enforced in blocking mode and the other endpoints in that environment are unaffected, inherting the monitoring policy from their parent *Environment*. + +*Proxies* in an *Environment* can also each have their own different policies applied should that be required. + +There are two methods available to enable adding an *Advanced Security* policy to your Deployment: + +
    +Environment +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create an *Advanced Security* policy using the REST API, send an HTTP `POST` or `PUT` request to the *Environments* endpoint. + + +{{}} + +| Method | Endpoint | +|--------|----------------------------------------------------| +| `POST` | `/infrastructure/workspaces/{infra-workspace}/environments` | +| `PUT` | `/infrastructure/workspaces/{infra-workspace}/environments/{environment-name}` | + +{{}} + + +
    +JSON request + +```json +{ + "policies": { + "advanced-security": [ + { + "action": { + "policyRef": "" + } + } + ] + } +} +``` + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To create an *Advanced Security* policy using the web interface: + +{{< include "acm/webui-acm-login.md" >}} + +1. On the left menu, select **Infrastructure**. +2. Select a workspace in the list that contains the Environment you want to update. +3. On the workspace overview page, on the **Environments** tab, locate the Environment you want to update and select it. +4. On the Environment Overview page, locate the **API Gateway** you want to update and select it. +5. On the **API Gateway** overview page, find and select the **Manage** button and select it. +6. On the *Advanced > Global Policies* page, locate **Advanced Security Policy**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. +7. On the *Advanced Security Policy* form, complete the necessary fields: +8. + - **Choose a NAP Policy Reference**: Specify the name of the policy you want to apply from the dropdown + +9. Select **Add**/**Save** to apply the policy to the Environment. +10. Select **Save and Submit** to deploy the configuration to the Environment. + +{{%/tab%}} + +{{
    }} +
    + +
    +Proxy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create an *Advanced Security* policy using the REST API, send an HTTP `POST` or `PUT` request to the Proxies endpoint. + + +{{}} + +| Method | Endpoint | +|--------|----------------------------------------------------| +| `POST` | `/services/workspaces/{service-workspace}/proxies` | +| `PUT` | `/services/workspaces/{service-workspace}/proxies/{proxy-name}` | + +{{}} + + +
    +JSON request + +```json +{ + "policies": { + "api-advanced-security": [ + { + "action": { + "policyRef": "", + "appProtectMode": "" + } + } + ] + } +} +``` + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To create an *Advanced Security* policy using the web interface: + +{{< include "acm/webui-acm-login.md" >}} + +1. On the left menu, select **Services**. +2. Select a workspace in the list that contains the *Proxy* you want to update. +3. On the workspace overview page, on the **API Proxies** tab, locate the *Proxy* you want to update and Select the **Actions** menu (represented by an ellipsis, `...`) and select **Edit proxy** +4. On the *Policies* page, locate **Advanced Security**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. +5. On the *Advanced Security Policy* form, complete the necessary fields: + + - **Choose your App Protect mode**: This allows the enforcement or non-enforcement on a particular group of API endpoints, you may want to disable *App Protect* for some endpoints but not others. + - **Choose a NAP Policy Reference**: Specify the name of the policy you want to apply from the dropdown. + +6. Select **Add**/**Save** to apply the policy to the *Proxy*. +7. Select **Save and Submit** to deploy the configuration to the *Proxy*. + +{{%/tab%}} + +{{
    }} + +
    diff --git a/content/nms/acm/how-to/policies/allowed-http-methods.md b/content/nms/acm/how-to/policies/allowed-http-methods.md new file mode 100644 index 000000000..8df5b4a63 --- /dev/null +++ b/content/nms/acm/how-to/policies/allowed-http-methods.md @@ -0,0 +1,136 @@ +--- +description: Learn how to block unwelcome requests to an endpoint by using the Allowed + HTTP Methods policy in F5 NGINX Management Suite API Connectivity Manager. +docs: DOCS-1121 +doctypes: +- API Connectivity Manager +- api management +- concept +tags: +- docs +title: Allowed HTTP Methods +toc: true +weight: 350 +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro" >}} + +--- + +## About Allow HTTP Methods Policy + +Use the *Allowed HTTP Methods* policy to specify which methods you want to allow, while automatically blocking all the others. As an example, you could allow only `GET` requests for static content. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} + +--- + +## Before You Begin + +To complete the steps in this guide, you need the following: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more [Environments with an API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}). +- You have [published one or more API Gateways]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}). + +--- + +## Policy Settings + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|------------------|-------|------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-----------------------| +| `allowedMethods` | array | `GET`, `PUT`, `POST`, `PATCH`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE` |

    This array contains all of the possible HTTP methods.

    Methods listed in `allowedMethods` will be accepted; any omitted methods will be blocked with a return code of `405 Method Not Allowed` (default), or a code of your choice.

    Note: `HEAD` requests are treated the same as `GET` requests.

    | Yes | | +| `returnCode` | int | In range `400-599` | The status code to be returned if a method isn't included in the `allowedMethods` array. | No | System assigned `405` | + +{{< /bootstrap-table >}} + + +--- + +## Applying the Policy + +Follow these steps to restrict which HTTP methods clients can use to access your API. If the request's HTTP method is not in the allowed methods list, a `405 Method Not Allowed` response is returned by default, or you can specify a different error code. + +{{< note >}} By enabling the `GET` method, the `HEAD` method is also enabled. {{< /note >}} + +
    + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create an *Allowed HTTP Methods* policy using the REST API, send an HTTP `POST` request to the Proxies endpoint. + + +{{}} + +| Method | Endpoint | +|--------|----------------------------------------------------| +| `POST` | `/services/workspaces/{service-workspace}/proxies` | + +{{}} + + +
    +JSON request + +```json +{ + "policies": { + "allowed-http-methods": [ + { + "action": { + "allowedMethods": [ + "GET", + "PUT", + "POST", + "PATCH", + "DELETE", + "CONNECT", + "OPTIONS", + "TRACE" + ], + "returnCode": 405 + } + } + ] + } +} +``` + +This JSON defines an *Allowed HTTP Methods* policy that specifies which HTTP methods are allowed. The listed methods (`GET`, `PUT`, `POST`, `PATCH`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`) are all allowed, and any other methods will return a `405 Method Not Allowed` response code. + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To create an *Allowed HTTP Methods* policy using the web interface: + +1. {{< include "acm/webui-acm-login.md" >}} +2. On the left menu, select **Services**. +3. Select a workspace in the list that contains the API Proxy you want to update. +4. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +5. On the left menu, select **API Proxy > Advanced > Policies**. +6. On the *Advanced > Policies* page, on the **API Proxy** tab, locate **Allowed HTTP Methods**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. +7. On the *Allowed HTTP Methods* form, complete the necessary fields: + + - **Allow following HTTP Methods**: Specify the HTTP methods you want to allow. Any methods that aren't included will be blocked. + - **Custom response code for non-matching requests**: Specify the status code to return for blocked methods. The default is `405 Method Not Allowed`. + +8. Select **Add** to apply the policy to the API proxy. +9. Select **Save and Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} + +{{
    }} diff --git a/content/nms/acm/how-to/policies/api-access-control-lists.md b/content/nms/acm/how-to/policies/api-access-control-lists.md new file mode 100644 index 000000000..109f8a9f2 --- /dev/null +++ b/content/nms/acm/how-to/policies/api-access-control-lists.md @@ -0,0 +1,125 @@ +--- +description: Learn how to protect your upstream TCP application servers by denying/allowing + access from certain client IP addresses, CIDR blocks, client IDs or JWT Claims. +docs: DOCS-950 +tags: +- docs +toc: true +weight: 200 +title: API Access Control Lists +--- + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +--- + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with [API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}) or [Dev Portal]({{< relref "/nms/acm/getting-started/add-devportal" >}}) clusters. +- You have published one or more [API Gateways or Developer]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}) + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +### How to Access the REST API + +{{< include "acm/how-to/access-acm-api" >}} + +--- + +## Create ACL IP Restriction Policy + +Take the steps in this section if you would like to deny or allow access to your API Gateways or Developer Portals to specific IP addresses or CIDR blocks with ACL lists. + +{{}} + {{%tab name="UI"%}} + +1. In the API Connectivity Manager user interface, go to **Services > \**, where "your workspace" is the workspace that contains the API Proxy. +1. Select **Edit Proxy** from the Actions menu for the desired API Proxy. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu. +1. Provide the desired **Allowed IP Addresses** and/or **Denied IP Addresses**. Valid values include IPv4, IPv6, and CIDR blocks. To allow or deny all, use the * symbol. + + + {{%/tab%}} + {{%tab name="API"%}} + +```json +"policies": { + "acl-ip": [ + { + "action": { + "deny": ["*"], // Polulate this array with your denied IP addresses + "allow": ["10.0.0.1"] + } + } + ] + } +``` + + {{%/tab%}} +{{}} + +{{< note >}} + +- If you only set an allow list, then the deny list will default to deny all and vice versa. +- If IP addresses are not explicitly allowed they will be denied. To allow IP addresses as default, include the `*` symbol in the allow list. +- The most specific rule applied will be used to allow or deny traffic. For example, IP addresses take priority over CIDR blocks. Smaller CIDR blocks take priority over larger ones. +{{< /note >}} + + + +### Verification + +1. Attempt to contact the API Gateway or Developer Portal from a denied IP address. The host should return the default `403 Forbidden` return code or the custom return code you have set. +1. Contact the IP address from an allowed IP address. The traffic should not be denied. + +## Create ACL Consumer Restriction Policy + +Specific consumer client IDs or token claims can be denied or allowed access to your API Gateways or Developer Portals by following the steps in this section. + +{{}} + {{%tab name="UI"%}} + +1. In the API Connectivity Manager user interface, go to **Services > \**, where "your workspace" is the workspace that contains the API Gateway or Dev Portal. +1. Select **Edit Advanced Config** from the **Actions** menu for the desired API Gateway or Dev Portal. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for the **ACL Consumer Restriction Policy**. +1. Set the **lookupVariable**. To route based on either the **APIKey Authentication** or **Basic Authentication**, use "client.id" to limit the user based on client ID. For a token-based policy such as **JSON Web Token Assertion** or **OAuth2 Introspection**, you should use "token.{claimKey}. For example: "token.sub" would use the sub claim of a JWT Token. +1. Provide the desired **Allowed List** and/or **Denied List**. + + {{%/tab%}} + {{%tab name="API"%}} + +```json +"policies": { + "acl-consumer": [ + { + "action": { + "lookupVariable": "client.id", + "allow": ["allowed-user"], + "deny": ["denied-user"] + } + } + ] + } +``` + + {{%/tab%}} + +{{}} + +{{< note >}} + +- If you only set an allow list, then the deny list will default to deny all and vice versa. +- If values are not allowed, they will be denied by default if neither list contains a wildcard. + {{< /note >}} + +### Verification + +1. Attempt to contact the API Gateway or Developer Portal from a denied using a client that has been denied. The host should return the default `403 Forbidden` return code. +1. Attempt to contact the API Gateway or Developer Portal from an allowed client. The traffic should should be successfully proxied. diff --git a/content/nms/acm/how-to/policies/apikey-authn.md b/content/nms/acm/how-to/policies/apikey-authn.md new file mode 100644 index 000000000..b0890971e --- /dev/null +++ b/content/nms/acm/how-to/policies/apikey-authn.md @@ -0,0 +1,135 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to secure + API Gateways by applying an API key authentication policy. +docs: DOCS-1117 +doctypes: +- API Connectivity Manager +- api management +- concept +tags: +- docs +toc: true +weight: 400 +title: API Key Authentication +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro" >}} + +--- + +## API Key Authentication + +{{< warning >}} API key authentication is recommended for test environments only. For production environments, consider a more robust authentication method. {{< /warning >}} + +Authentication & authorization policies allow a user to restrict access to their APIs by determining the caller's identity and access level. There are several API Gateway authentication/authorization policy types supported by API Connectivity Manager: API key authentication, basic authentication, OAuth2 JWT assertion, and OAuth2 token introspection. This guide focuses specifically on API key authentication. + +An API key is usually a long, pseudo-random string included in the request header or request URL. It is a shared secret between the API client and the API gateway. The server allows the client to access data only after the client authenticates the API key. + +API Connectivity Manager API owners can restrict access to their APIs with API keys. The API Proxy Policy can be configured to grant access to APIs only after verifying that the API Key is valid. + +--- + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with an [API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}). +- You have published one or more [API Gateways]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}) + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +### How to Access the REST API + +{{< include "acm/how-to/access-acm-api" >}} + +--- + +## Create an API Key Authentication Policy + +Take the steps in this section if you want to restrict access to APIs to clients with a valid API key. You can set up an API key authentication policy using either the web interface or the REST API. + +{{}} +{{%tab name="API"%}} + +Send a POST request to add the API key authentication policy to the API Proxy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +{{< note >}} To include sensitive data in Proxy `GET` requests, provide the query parameter `includes=sensitivedata`; otherwise, the response will have this data redacted. {{< /note >}} + +```json +{ + "policies": { + "apikey-authn": [ + { + "action": { + "apiKeyName": "apikey", + "suppliedIn": "header", + "credentialForward": false, + "errorReturnConditions": { + "notSupplied": { + "returnCode": 401 + }, + "noMatch": { + "returnCode": 403 + } + } + }, + "data": [ + { + "clientID": "clientA", + "apiKey": "5ff229f7d64e4d6" + }, + { + "clientID": "clientB" + } + ] + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|------------------------------------------------------------|----------|----------------------------|-----------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `apiKeyName` | string | Example: `clientAPIKey` | The name of the header or query parameter where the API key will be located in the API request. | No | `apikey` | +| `suppliedIn` | string | One of `["HEADER","QUERY"]`| How the API key will be supplied by the consumer of the API via HTTP request. | No | `HEADER` | +| `credentialForward` | boolean | `true/false` | If the API key credential is proxy-forwarded to the backend service in the HTTP header or query parameters. | No | `False` | +| `errorReturnConditions`
    `.notSupplied`
    `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when an invalid API key is supplied. | No | `401` | +| `errorReturnConditions`
    `.noMatch`
    `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when an API key is not supplied. | No | `403` | +| `data.clientID` | string | Example: `ClientA` | Identifies the client who is holding the API Key. | Yes | N/A | +| `data.apiKey` | string | Example: `5ff229f7d64e4d6` | The value of the API Key used to access the API. If an API Key is not provided, a random 32-byte key will be created. | No | N/A | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. In the API Connectivity Manager user interface, go to **Services > \{your workspace}**, where "your workspace" is the workspace that contains the API Proxy. +2. Select **Edit Proxy** from the **Actions** menu for the desired API Proxy. +3. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **API Key Authentication**. +4. Provide the **API Key name** if different from the default value `apikey` and if the key should be provided in the request **Header** or as a **Query** parameter. +5. Set custom error return code conditions if an API Key is **not supplied** or **does not match** a key configured for API access. +6. By default, NGINX will strip the API key from the request headers before forwarding the request to the backend service. To preserve the API key header, enable the toggle for **Forward credentials to backend service**. +7. Configure the associated **Client ID** and **API Key** for each client that requires API access. If an **API Key** is not provided, a random 32-byte key will be created. Repeat this process for all clients. +8. Select **Add** to apply the API key authentication policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{
    }} diff --git a/content/nms/acm/how-to/policies/basic-authn.md b/content/nms/acm/how-to/policies/basic-authn.md new file mode 100644 index 000000000..42b2bae02 --- /dev/null +++ b/content/nms/acm/how-to/policies/basic-authn.md @@ -0,0 +1,125 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to secure + API Gateways by applying a basic authentication policy. +docs: DOCS-1118 +doctypes: +- API Connectivity Manager +- api management +- concept +tags: +- docs +toc: true +weight: 450 +title: Basic Authentication +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro" >}} + +--- + +## Basic Authentication + +{{< warning >}} Basic authentication is recommended for test environments only. For production environments, consider a more robust authentication method. {{< /warning >}} + +Authentication & authorization policies allow a user to restrict access to their APIs by determining the caller's identity and access level. There are several API Gateway authentication/authorization policy types supported by API Connectivity Manager: API key authentication, basic authentication, OAuth2 JWT assertion, and OAuth2 token introspection. This guide focuses specifically on basic authentication. + +Basic authentication is a method for HTTP users to provide a username and password when making an API request. In basic HTTP authentication, a request contains a header field in the form of `Authorization: Basic `, where credentials is the Base64 encoding of username and password joined by a single colon. + +API Connectivity Manager API owners can restrict access to their APIs with usernames and passwords. The API Proxy Policy can be configured to grant access to APIs only after verifying that the username and password are valid. + +--- + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with an [API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}). +- You have published one or more [API Gateways]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}) + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +### How to Access the REST API + +{{< include "acm/how-to/access-acm-api" >}} + +--- + +## Create a Basic Authentication Policy + +Take the steps in this section if you want to restrict access to APIs to clients with a valid username and password. You can set up a basic authentication policy using either the web interface or the REST API. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to add the basic authentication policy to the API Proxy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +{{< note >}} To include sensitive data in Proxy `GET` requests, provide the query parameter `includes=sensitivedata`; otherwise, the response will have this data redacted. {{< /note >}} + +```json +{ + "policies": { + "basic-authn": [ + { + "action": { + "credentialForward": false, + "errorReturnConditions": { + "notSupplied": { + "returnCode": 401 + } + } + }, + "data": [ + { + "clientID": "ClientA", + "username": "UserA", + "password": "secret123" + } + ] + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|------------------------------------------------------------|----------|----------------------|------------------------------------------------------------------------------------------------------|----------|---------------| +| `credentialForward` | boolean | `true/false` | If the basic auth credentials are proxy-forwarded to the backend service in the HTTP header. | No | `False` | +| `errorReturnConditions`
    `.notSupplied`
    `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when invalid basic auth credentials are supplied. | No | `401` | +| `data.clientID` | string | Example: `ClientA` | Identifies the client who is holding the basic authentication credentials. | Yes | N/A | +| `data.username` | string | Example: `UserA` | The value of the client's password. | Yes | N/A | +| `data.password` | string | Example: `secret123` | The value of the client's username. | Yes | N/A | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. In the API Connectivity Manager user interface, go to **Services > \{your workspace}**, where "your workspace" is the workspace that contains the API Proxy. +2. Select **Edit Proxy** from the **Actions** menu for the desired API Proxy. +3. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **Basic Authentication**. +4. By default, NGINX will strip the basic authentication credentials from the request headers before forwarding the request to the backend service. To preserve the credentials, enable the toggle for **Forward credential**. +5. Set custom error return code conditions if basic authentication credentials are **not supplied**. +6. Configure the associated **Client ID**, **Username**, and **Password** for each client that requires API access. +7. Select **Add** to apply the basic authentication policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{
    }} diff --git a/content/nms/acm/how-to/policies/cluster-wide-config.md b/content/nms/acm/how-to/policies/cluster-wide-config.md new file mode 100644 index 000000000..a9acf0910 --- /dev/null +++ b/content/nms/acm/how-to/policies/cluster-wide-config.md @@ -0,0 +1,170 @@ +--- +description: Learn how to configure the Cluster-Wide Config settings to fine tune + and control proxy cluster's behavior with performance enhancing configurations. +docs: DOCS-1160 +doctypes: +- API Connectivity Manager +- api management +- concept +tags: +- docs +title: Cluster-Wide Config +toc: true +weight: 498 +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-cluster-intro" >}} + +--- + +## About the Policy + +Use the *Cluster-Wide Config* settings to fine tune the worker connections, [hash table size](https://nginx.org/en/docs/hash.html), and keepalive settings to speed up data processing and improve the performance of the API proxy for large number of connections. When applied, the settings are applicable to all the instances in a proxy cluster. If the proxy cluster is shared between environments, the changes made in any environment will be reflected in all the other environments. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} + +--- + +## Workflow for Applying Policy + +To apply the policy or make changes to it, here's what you need to do: + +- Create an environment or edit an existing one. +- Check the cluster config settings for the environment to see if the policy has been applied. +- Edit the policy to make changes for each cluster. Save and publish the changes. + +--- + +## Policy Settings {#policy-settings} + +The following table lists the configurable settings and their default values for the policy. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Datatype | Possible Values | Description | Required | Default | +|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------| +| `mapHashBucketSize` | integer | example: 256 | Sets the bucket size for the map hash table. | No | 128 | +| `mapHashMaxSize` | integer | example: 2048 | Sets the maximum bucket size for the map hash table. | No | 2048 | +| `serverNamesHashBucket` | integer | example: 256 | Sets the bucket size for the server names hash tables | No | 256 | +| `serverNamesHashMaxSize` | integer | example: 1024 | Sets the maximum size of the server names hash tables. | No | 1024 | +| `workersConfig.connections` | integer | In range `256–65536` | Sets the maximum number of simultaneous connections that can be opened by a worker process. | No | 8192 | +| `workersConfig.maxProcesses` | string | `^(auto\|[1-9]\|[1-2][0-9]\|3[0-2])$` | Defines the number of worker processes. | No | auto | +| `workersConfig.maxLimitForOpenFile` | integer | In range `512–262144` | Changes the limit on the maximum number of open files (RLIMIT_NOFILE) for worker processes. Used to increase the limit without restarting the main process. | No | 20000 | +| `clientConnection.keepaliveTimeout` | string | ^([0-9]+)(([h\|m\|s]){1})$ | The first parameter sets a timeout during which a keep-alive client connection will stay open on the server side. | No | 75s | +| `clientConnection.keepaliveHeaderTimeout` | string | ^([0-9]+)(([h\|m\|s]){1})$ | ? | No | | +| `clientConnection.keepaliveRequests` | integer | In range `50–20000` | Sets the maximum number of requests that can be served through one keepalive connection. | No | 1000 | +| `clientConnection.keepaliveTime` | string | ^([0-9]+)(([h\|m\|s]){1})$ | Maximum time during which requests can be processed through one keepalive connection. | No | "1h" | +| `clientHeaderBuffer.size` | string | ([.\d]+)(?:M\|K) | Sets the maximum size of buffers used for reading a large client request header. | No | 8K | +| `clientHeaderBuffer.number` | integer | In range `1–64` | Sets the maximum number of buffers used for reading a large client request header. | No | 4 | +| `clientHeaderBuffer.timeout` | string | ^[0-9]+[h\|m\|s]{1}$ | Defines a timeout for reading client request header. | No | "60s" | + +{{< /bootstrap-table >}} + + +--- + +## Updating Cluster-Wide Policy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create an Cluster-Wide Config settings using the REST API, send an HTTP `PUT` request to the Add-Endpoint-Name-Here endpoint. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Method | Endpoint | +|--------|---------------------| +| `PUT` | `/infrastructure/workspaces/{infraWorkspaceName}/proxy-clusters/{clusterName}`| + +{{}} + + +
    +JSON request - Cluster-Wide Config with minimum configuration + +``` json +{ + "policies": { + "cluster-wide-config": [], + } +} +``` + +
    + +
    +JSON request - Cluster-Wide Config with all options specified + +``` json +{ + "policies": { + "cluster-wide-config": [ + { + "action": { + "clientConnection": { + "keepaliveRequests": 1000, + "keepaliveTime": "1h", + "keepaliveTimeout": "75s" + }, + "clientHeaderBuffer": { + "number": 4, + "size": "8K", + "timeout": "60s" + }, + "mapHashBucketSize": 128, + "mapHashMaxSize": 2048, + "serverNamesHashBucket": 256, + "serverNamesHashMaxSize": 1024, + "workersConfig": { + "connections": 8192, + "maxLimitForOpenFile": 20000, + "maxProcesses": "auto" + } + }, + } + ], + } +} +``` + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To create a Cluster-Wide Config setting using the web interface: + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Infrastructure**. +3. Choose the workspace that contains your cluster's environment from the list of workspaces. +4. In the **Environments** section, select the environment name for your cluster. +5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Cluster Config**. +6. On the left menu, select **Cluster Policies**. +7. In the list of Cluster Policies, the Cluster-Wide Config setting should be enabled by default. To edit the policy, select the ellipsis icon (`...`), then select **Edit Cluster Config**. +8. Customize the policy settings to suit your requirements. Refer to the [Policy Settings](#policy-settings) section for an overview of the available options and their possible configurations. +9. Select **Save** to save the changes. +10. Select **Save and Submit** to publish the policy changes to the environment. + +{{%/tab%}} + +{{
    }} + +--- + +## Verify the Policy + +Confirm that the policy has been set up and configured correctly by taking these steps: + +- Verify the NGINX configuration was applied by this policy. + + diff --git a/content/nms/acm/how-to/policies/cluster-zone-sync.md b/content/nms/acm/how-to/policies/cluster-zone-sync.md new file mode 100644 index 000000000..e1da295ee --- /dev/null +++ b/content/nms/acm/how-to/policies/cluster-zone-sync.md @@ -0,0 +1,419 @@ +--- +description: Learn how to configure the Cluster Zone Sync policy to enable runtime + state sharing between the instances belonging to a proxy cluster. +docs: DOCS-1159 +doctypes: +- API Connectivity Manager +- api management +- concept +tags: +- docs +title: Cluster Zone Sync +toc: true +weight: 499 +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-cluster-intro" >}} + +--- + +## About the Policy + +Use the *Cluster Zone Sync* policy to enable runtime state sharing between the instances belonging to a proxy cluster. Options configured through this policy affect other policies such as rate limit and OIDC. This policy is applied to all the instances in a proxy cluster. If the proxy cluster is shared between environments, any changes made to this policy will affect all the other environments. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} + +--- + +## Workflow for Applying Policy + +To apply the policy or make changes to it, here's what you need to do: + +- Create an environment or edit an existing one. +- Check the cluster config settings for the environment to see if the policy has been applied. +- Edit the policy to make changes for each cluster. Save and publish the changes. + +{{< note >}} +We strongly recommend securing your Zone Sync environment by enabling TLS for your listeners and Zone Sync TLS verification for the policy. To do this, you'll need to provide server certificates, as well as Zone Sync certificates and CA certs. + +When adding a new instance to a cluster with the Zone Sync policy applied, make sure the instance is resolvable by DNS if a DNS server is used, or that the Zone Sync Server list is updated to include the instance if the list is provided manually. + +Similarly, when removing an instance from a cluster with the Zone Sync policy applied, be sure to do the necessary clean-up in the DNS resolver or the Zone Sync Server list. +{{< /note >}} + +--- + +## Policy Settings + +The following table lists the configurable settings and their default values for the policy. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Datatype | Possible Values | Description | Required | Default | +|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------| +| `tcpServer.listeners[].transportProtocol` | string | ["TCP"] | Stream listener to configure protocol for zone sync stream. | No | "TCP" | +| `tcpServer.listeners[].port` | integer | In range `1-65535` | Stream listener to configure port for zone sync stream. | Yes | | +| `tcpServer.listeners[].enableTLS` | boolean | fase/true | Stream listener to enable TLS for zone sync stream. | No | false | +| `tcpServer.listeners[].ipv6` | boolean | false/true | Stream listener to enable ipv6 for zone sync stream. | No | false | +| `tcpServer.hostnames` | array | | Configure hostnames | No | [] | +| `tcpServer.tlsCipher` | string | | Specifies the enabled ciphers. The ciphers are specified in the format understood by the OpenSSL library | No | ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5 | +| `tcpServer.tlsSessionCache.enable` | string | [ "on", "off", "none" ] | Specifies session parameters to avoid SSL handshakes for parallel and subsequent connections. | No | "on" | +| `tcpServer.tlsSessionCache.type` | string | [ "shared", "builtin" ] | Specifies session parameters to avoid SSL handshakes for parallel and subsequent connections. | No | "shared" | +| `tcpServer.tlsSessionCache.size` | string | ^([0-9]+)(([K\|M\|G]){1})$ | Maximum size of the Cache. Valid units are: K, M, G for kilobytes, megabytes, and gigabytes, respectively. | No | "10M" | +| `tcpServer.tlsProtocols` | array | ["TLSv1.1", "TLSv1.2", "TLSv1.3"] | Enables the specified protocols. | No | [ "TLSv1.2" ] | +| `tcpServer.tlsSessionTimeout` | string | ^([0-9]+)(([d\|h\|m\|s]){1})$ | Specifies cache timeout. Valid units are: s, m, h and d for seconds, minutes, hours, and days respectively. | No | "5m" | +| `zoneSyncServers[].hostname` | array | | Defines the address of a cluster node. The address can be specified as a domain name or IP address. A domain name that resolves to several IP addresses defines multiple nodes at once. | Yes | | +| `zoneSyncServers[].port` | array | | Defines the address of a cluster node. The address can be specified as a domain name or IP address. A domain name that resolves to several IP addresses defines multiple nodes at once. | Yes | | +| `enableZoneSyncTLS` | boolean | false | Enables the TLS protocol for connections to another cluster server. When this is enabled certificates need to be provided in the data section of the policy. | No | System assigned | +| `enableZoneSyncCertVerify` | boolean | false | Enables the TLS verification for connections to another cluster server. When this is enabled certificates need to be provided in the data section of the policy. | No | System assigned | +| `zoneSyncCertChainVerifyDepth` | integer | 1 | Sets the verification depth for another cluster server certificates chain. | No | System assigned | +| `zoneSyncEnableSNI` | boolean | false | Enables or disables passing of the server name through TLS Server Name Indication (SNI) when establishing a connection with another cluster server. | No | System assigned | +| `zoneSyncTLSName` | string | In range `1–110` | Allows overriding the server name used to verify the certificate of a cluster server and to be passed through SNI when establishing a connection with the cluster server. | No | | +| `zoneSyncBuffers.number` | integer | 1–128 | Configure size and umber of per-zone buffers used for pushing zone contents. A single buffer must be large enough to hold any entry of each zone being synchronized. | No | 8 | +| `zoneSyncBuffers.size` | string | ^[0-9]+[K\|M\|G]{1}$ | Configure size and umber of per-zone buffers used for pushing zone contents. A single buffer must be large enough to hold any entry of each zone being synchronized. | No | "8k" | +| `zoneSyncConnectionRetryInterval` | string | ^[0-9]+[h\|m\|s]{1}$ | Defines an interval between connection attempts to another cluster node. | No | "1s" | +| `zoneSyncConnectionTimeout` | string | ^[0-9]+[h\|m\|s]{1}$ | Defines a timeout for establishing a connection with another cluster node. | No | "5s" | +| `zoneSyncInterval` | string | ^[0-9]+[h\|m\|s]{1}$ | Defines an interval for polling updates in a shared memory zone. | No | "1s" | +| `zoneSyncTimeout` | string | ^[0-9]+[h\|m\|s]{1}$ | Sets the timeout between two successive read or write operations on connection to another cluster node. | No | "1s" | + +{{< /bootstrap-table >}} + + +--- + +## Adding Cluster Zone Sync Policy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create an Cluster Zone Sync policy using the REST API, send an HTTP `PUT` request to the Add-Endpoint-Name-Here endpoint. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Method | Endpoint | +|--------|---------------------| +| `PUT` | `/infrastructure/workspaces/{infraWorkspaceName}/proxy-clusters/{clusterName}` | + +{{}} + + +
    +JSON request - Cluster Zone Sync with minimum configuration + +``` json +{ + "cluster-zone-sync": [ + { + "action": { + "tcpServer": { + "listeners": [ + { + "port": 12345 + } + ] + }, + "zoneSyncServers": [ + { + "name": "nginx-cluster-instance-1.com", + "port": 12345 + }, + { + "name": "nginx-cluster-instance-2.com", + "port": 12345 + } + ] + } + } + ] +} +``` + +
    + +
    +JSON request - Cluster Zone Sync with DNS Resolver + +``` json +{ + "cluster-zone-sync": [ + { + "action": { + "tcpServer": { + "listeners": [ + { + "port": 12345 + } + ], + }, + "resolver": { + "enableIPv6": false, + "valid": "30s", + "timeout": "5s", + "servers": [ + { + "hostname": "192.0.2.0" + } + ] + }, + "zoneSyncServers": [ + { + "name": "nginx-cluster.com", + "port": 12345 + } + ] + } + } + ] +} +``` + +
    + +
    +JSON request - Cluster Zone Sync with DNS Resolver and TCP Server TLS enabled + +``` json +{ + "cluster-zone-sync": [ + { + "action": { + "tcpServer": { + "listeners": [ + { + "port": 12345, + "tlsEnabled": true + } + ], + }, + "resolver": { + "enableIPv6": false, + "valid": "30s", + "timeout": "5s", + "servers": [ + { + "hostname": "192.0.2.0" + } + ] + }, + "zoneSyncServers": [ + { + "name": "nginx-cluster.com", + "port": 12345 + } + ] + }, + "data": { + "serverCerts": [ + { + "key": "", + "cert": "" + } + ] + } + } + ] +} + +``` + +
    + +
    +JSON request - Cluster Zone Sync with secure TLS between nodes in a cluster + +``` json +{ + "cluster-zone-sync": [ + { + "action": { + "tcpServer": { + "listeners": [ + { + "port": 12345, + "tlsEnabled": true + } + ], + }, + "resolver": { + "enableIPv6": false, + "valid": "30s", + "timeout": "5s", + "servers": [ + { + "hostname": "192.0.2.0" + } + ] + }, + "enableZoneSyncTLS": true, + "zoneSyncServers": [ + { + "name": "nginx-cluster.com", + "port": 12345 + } + ] + }, + "data": { + "serverCerts": [ + { + "key": "", + "cert": "" + } + ], + "zoneSyncCerts": [ + { + "key": "", + "cert": "" + } + ], + "zoneSyncTrustedCACert": "" + } + } + ] +} + +``` + +
    + +
    +JSON request - Cluster Zone Sync with all options specified + +``` json +{ + "cluster-zone-sync": [ + { + "action": { + "tcpServer": { + "listeners": [ + { + "port": 12345, + "tlsEnabled": true + } + ], + "hostnames": ["10.0.0.9"], + "tlsProtocols": ["TLSv1.1", "TLSv1.2", "TLSv1.3"], + "tlsCipher": "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384", + "tlsSessionCache": { + "enable": "none" + }, + "tlsSessionTimeOut": "15m" + }, + "resolver": { + "enableIPv6": false, + "valid": "30s", + "timeout": "5s", + "servers": [ + { + "hostname": "192.0.2.0" + } + ] + }, + "enableZoneSyncTLS": true, + "enableZoneSyncCertVerify": true, + "zoneSyncCertChainVerifyDepth": 2, + "zoneSyncEnableSNI": true, + "zoneSyncTLSServerName": "custom-sni-host.com", + "zoneSyncServers": [ + { + "name": "nginx-cluster.com", + "port": 12345 + } + ], + "syncBuffers": { + "number": 10, + "size": "8k" + }, + "connectionRetryInterval": "8s", + "connectionTimeout": "10m", + "timeout": "5s", + "interval": "1s" + }, + "data": { + "serverCerts": [ + { + "key": "", + "cert": "" + } + ], + "zoneSyncCerts": [ + { + "key": "", + "cert": "" + } + ], + "zoneSyncTrustedCACert": "" + } + } + ] +} + +``` + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To create a Cluster Zone Sync policy using the web interface: + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Infrastructure**. +3. Choose the workspace that contains your cluster's environment from the list of workspaces. +4. In the **Environments** section, select the environment name for your cluster. +5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Cluster Config**. +6. On the left menu, select **Cluster Policies**. +7. Locate the **Cluster Zone Sync** policy in the list of policies. On the **Actions** menu (represented by an ellipsis, `...`), select **Add Policy**. +8. On the **Cluster Zone Sync** form, complete the necessary fields: + + - **TLS Server Settings - Port**: Specify port for zone sync stream server. + - **Zone Sync Settings - hostname**: Enter the address of a cluster node. The address can be specified as a domain name or IP address. A domain name that resolves to several IP addresses defines multiple nodes at once. +9. Select **Add** to apply the policy to the cluster. +10. Select **Save and Submit** to deploy the configuration. + +{{%/tab%}} + +{{
    }} + +--- + +## Verify the Policy + +Confirm that the policy has been set up and configured correctly by taking these steps: + +- Verify OIDC KeyValue Zone Sync is synchronized between instances within a cluster. +- Verify no OIDC session issues are presented when using multiple instances in a cluster. +- Verify applied rate limit for a proxy in a cluster is synchronized between instances within a cluster. + +--- + +## Troubleshooting + +For help resolving common issues when setting up and configuring the policy, follow the steps in this section. If you cannot find a solution to your specific issue, reach out to [NGINX Customer Support]({{< relref "/nms/support/contact-support.md" >}}) for assistance. + +### Issue 1 + +When the runtime state is not syncing between the instances in a desired proxy cluster. + +Resolution/Workaround: + +1. Ensure the tcp listener port of each instance is accessible within the desired proxy cluster. +2. By default the tcp listener port is open for all, but if you've provided tcp hostnames, then ensure the desired hostnames are resolvable. + +### Issue 2 + +If you see errors in the NGINX logs related to TLS when TLS is enabled (`enableZoneSyncTLS` is set to `true`) and zone sync servers are IP addresses, try the following workarounds: + +Resolution/Workaround: + +1. Ensure that you override the default server name (`zoneSyncTLSName`) used to verify the certificate of the desired cluster server to match the Subject Alternative Name of the cert provided. +2. If you are using DNS for zone sync servers, make sure you use the correct certificate that matches the hostname provided under zone sync server option (`zoneSyncServers[].hostname`). diff --git a/content/nms/acm/how-to/policies/cors.md b/content/nms/acm/how-to/policies/cors.md new file mode 100644 index 000000000..f38458149 --- /dev/null +++ b/content/nms/acm/how-to/policies/cors.md @@ -0,0 +1,139 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to handle + Cross-Origin Resource Sharing for your backend services. +docs: DOCS-1130 +doctypes: +- API Connectivity Manager +- api management +- reference +tags: +- docs +title: CORS +toc: true +weight: 500 +--- + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +--- + +## About the Policy + +The CORS policy allows users to configure API Gateways to set the required headers to allow Cross-Origin Resource Sharing (CORS). CORS is a series of headers instructing web browsers which origins should be permitted to load resources other than the API Gateway origin. + +### Intended Audience + +{{< include "acm/how-to/policies/api-owner-persona.md">}} + +--- + +## Before You Begin + +To complete the steps in this guide, you need the following: + +- API Connectivity Manager is installed, licensed, and running. +- An [API gateway environment]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}) +- A [published API Gateway]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}) + +--- + +## Policy Settings + + + +The following table lists the configurable settings and their default values for the policy. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Datatype | Possible Values | Description | Required | Default | +| ------------------- | ------------ | ---------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------------------------------------------------------------------------- | +| `allowCredentials` | boolean | `true`, `false` | When set to `true`, the `Access-Control-Allow-Credentials` header is set to `true` for all responses. | No | `false` | +| `allowHeaders` | string array | Example: `["X-header-name", "Authorization"]` | Used to set the `Access-Control-Allow-Headers` header, which tells the browser which headers can be used in the request. | No | `["Authorization", "Origin", "Content-Type", "Accept", "X-Cache-Status"]` | +| `allowMethods` | string array | `["GET", "HEAD", "PUT", "PATCH", "POST", "DELETE", "OPTIONS", "TRACE", "CONNECT"]` | Used to set the `Access-Control-Allow-Methods` header, which tells the browser which methods can be used in the request. | No | `["GET", "HEAD", "OPTIONS"]` | +| `allowOrigins` | Origin array | Example: `[{"exact":"example1.com"},{"exact":"example2.com"}]` | Used to set the `Access-Control-Allow-Origins` header, which tells the browser which origins can make a request. If set to `[{"exact":"*"}]` all origins will be accepted. | No | `[{"exact":"*"}]` | +| `exposedHeaders` | string array | Example: `[ "header-name", "x-correlation-id", "*" ]` | Used to set the `Access-Control-Expose-Headers` header, which tells the browser which headers can be accessed in the response. | No | `[]` | +| `maxAge` | integer | 5–60000 | Used to set the `Access-Control-Max-Age` header, which tells the browser what is the maximum length of time in seconds that preflight requests can be cached | No | N/A | +| `preflightContinue` | boolean | `true`, `false` | When set to `true`, preflight requests are proxied to the backend service. Otherwise, they are handled by the API Gateway. | No | `false` | + +{{< /bootstrap-table >}} + + +{{< note >}} +Setting a wildcard (`*`) in `exposedHeaders` does not include headers related to `Access-Control-Allow-Credentials`; those must explicitly be added to exposed headers. +{{< /note >}} + +--- + +## Adding XYZ Policy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create an CORS policy using the REST API, send an HTTP `PUT` request to the Proxies endpoint. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Method | Endpoint | +| ------ | ------------------------------------------------------- | +| `POST` | `/services/workspaces/{SERVICE_WORKSPACE_NAME}/proxies` | + +{{}} + + +
    +JSON request + +``` json +{ + "policies": { + "cors": [ + { + "action": { + "allowCredentials": true, + "allowMethods": [ + "GET", "HEAD", "PUT", "PATCH", "POST" + ], + "allowOrigins": [ + { + "exact": "example.com" + } + ], + "exposedHeaders": [ + "header-name", "x-correlation-id" + ], + "maxAge": 30000 + } + } + ] + } +} +``` + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Services**. +3. Select a workspace in the list that contains the API Proxy you want to update. +4. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +5. On the left menu, select **API Proxy > Advanced > Policies**. +6. On the *Advanced > Policies* page, on the **API Proxy** tab, locate **CORS**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. +7. Modify the CORS configuration as needed. +8. Select **Save** to apply the policy to the API Proxy. +9. Select **Save and Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} + +{{
    }} + +--- diff --git a/content/nms/acm/how-to/policies/error-response-format.md b/content/nms/acm/how-to/policies/error-response-format.md new file mode 100644 index 000000000..aa6f0a760 --- /dev/null +++ b/content/nms/acm/how-to/policies/error-response-format.md @@ -0,0 +1,133 @@ +--- +description: Learn how to use the F5 NGINX Management Suite API Connectivity Manager + to configure the Error Response Format policy that customizes HTTP error codes and + messages. +docs: DOCS-1345 +doctypes: +- API Connectivity Manager +- api management +- task +tags: +- docs +title: Error Response Format +toc: true +weight: 550 +--- + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +--- + +## About the Policy + +This policy specifies how the API gateway will intercept HTTP errors from the backend(s) and respond to the client with a standard or customized error response in JSON format. +The client will receive the Custom Status and Error Message in JSON format, instead of the standard HTTP error coming from the backend. +The Error Response Format policy is applied by default to any new environment. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} + +--- + +## Workflow for Applying Policy + +To apply the policy or make changes to it, follow these steps: + +- [Edit an existing environment or create a new one]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). +- Review the advanced settings for the environment to confirm if the policy has been applied. +- Edit the policy to make changes for each environment. Save and publish the changes. + +--- + +## Policy Settings + +{{< note >}} + +Either `errorMessage` or `errorMessageBody` must be provided for each error code. + +{{< /note >}} + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default Value | +|------------------------|---------|------------------------------------|---------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `errorCode` | int | In range `400-599` | The error code that needs to be used by the NGINX data-plane to return to the user. | Yes | N/A | +| `errorMessage` | string | Max length `2048` | The customized error message that needs to be used by the NGINX data-plane to convey error information. | No | N/A | +| `errorMessageBody` | object | Example: `{"errMsg":"My Message"}` | The customized JSON errors that needs to be used by the NGINX data-plane to convey error information to the user. | No | N/A | + +{{< /bootstrap-table >}} + +--- + +## Applying the Policy + +You can apply this policy using the web interface or the REST API. + +
    + +{{}} + +{{%tab name="API"%}} + +To create an Error Response Format policy using the REST API, send an HTTP `POST` request to the environment endpoint. + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------------------| +| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | + +{{}} + +
    +JSON request + +```json +{ + "policies": { + "error-response-format": [ + { + "systemMetadata": { + "appliedOn": "inbound", + "context": "global" + }, + "action": { + "400": { + "errorCode": "13", + "errorMessage": "Bad Request" + } + } + } + ] + } +} +``` + +This JSON example defines an Error Response policy. + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To add an Error Response Format policy using the web interface: + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +1. On the left menu, select **Infrastructure**. +1. Choose the workspace that includes the environment for the cluster you want to add the policy to. +1. Select the environment. +1. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. +1. On the left menu, select **Global Policies**. +1. From the list of policies, locate the **Error Response Format** policy, then select **Add Policy** from the **Actions** menu (represented by an ellipsis, `...`). +1. Configure the associated **Error Code**, **Error Message** and **Error Message Body** for each error code. +1. Additional entries can be created by selecting “Add Error Code” at the bottom of the table. +1. Select **Add** to apply the policy to the cluster. +1. Select **Save and Submit** to deploy the configuration. + +{{%/tab%}} + +{{
    }} diff --git a/content/nms/acm/how-to/policies/grpc-policies.md b/content/nms/acm/how-to/policies/grpc-policies.md new file mode 100644 index 000000000..5e78a0272 --- /dev/null +++ b/content/nms/acm/how-to/policies/grpc-policies.md @@ -0,0 +1,199 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to configure + policies for your gRPC API Gateway. +docs: DOCS-1084 +doctypes: +- task +tags: +- docs +toc: true +weight: 600 +title: gRPC +--- + +{{< shortversions "1.3.0" "latest" "acmvers" >}} + +## Overview + +{{< include "acm/how-to/policies-intro.md" >}} + +Refer to the [Set Up Policies]({{< relref "/nms/acm/how-to/policies/manage-policies.md" >}}) topic for instructions on how to configure policies for your API Gateway and Developer Portal clusters and API Proxies. + +--- + +## Global Policies + +### Return Default gRPC Status Codes {#grpc-status-codes} + +The default NGINX error pages are suitable for conventional HTTP traffic. gRPC clients, however, expect [gRPC responses](https://github.com/grpc/grpc/blob/master/doc/statuscodes.md). + +To return default gRPC status codes, send a POST request to the Environments endpoint. + + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------| +| POST | `/infrastructure/workspaces//environments` | + +{{}} + + +
    +Example JSON request + +```json +{ + "name": "{{environmentname}}", + "type": "NON-PROD", + "functions": [ + "API-GATEWAY" + ], + "proxies": [ + { + "proxyClusterName": "{{instanceGroupName}}", + "hostnames": [ + "{{environmentHostname}}" + ], + "runtime": "GATEWAY-PROXY", + "listeners": [ + { + "port": 8085, + "transportProtocol": "GRPC" + } + ], + "policies": { + "error-response-format": [ + { + "action": { + "400": { + "errorCode": "13", + "grpcStatusCode": 5, + "errorMessage": "Bad Request" + } + } + } + ] + } + } + ] +} +``` + +
    + +### Log Format {#grpc-log-format} + +Use the following variables to log gRPC-specific information. These variables are enabled by default for gRPC APIs. + +{{}} + +| Variable | Description | +|---------------|----------------------------------------------------------------------------------------------------------------------| +| `grpcMethod` | The RPC method invoked in the call. | +| `grpcService` | The service; for example, `routeguide.RouteGuide` | +| `grpcStatus` | The gRPC [status code](https://github.com/grpc/grpc/blob/master/doc/statuscodes.md) returned by the upstream server. | +| `grpcMessage` | The `grpc-message` trailer/header | + +{{< /bootstrap-table >}} + +Take note of the following considerations when using these standard log format variables for logging gRPC details: + +- `requestURI` - This is the relative URI of the gRPC method. The HTTP2 `:path` pseudo-header is used for this. +- `timestamp` - For streaming methods, this value reflects when the stream is closed. +- `totalLatency` - For streaming methods, this value reflects the entire duration of the stream. +- `bodySize` - For streaming methods, this value counts all of the bytes sent during the duration of the stream and not for individual messages. + +### Request Body Size Limit + +For streaming methods, the request body size limit is enforced on the entire stream, not per individual message. Therefore, we recommend configuring the limit to be very large or disabling this policy altogether for long-lived streams. + +--- + +## API Proxy Policies + +### Auth Policies + +The following policies involve some degree of header reading and modifying depending on their configuration and work the same with [gRPC metadata](https://grpc.io/docs/what-is-grpc/core-concepts/#metadata): + +- API Key +- Basic Auth +- JWT Assertion +- OAuth2 Introspection + +Select `header` for any policy setting that configures the supplied-in value. + +For example, suppose the `Authorization` header is used for the API Key authentication, and credential forwarding has been enabled. In that case, the following example Go server code can access that value in the metadata as shown below: + +```go +// GetFeature returns the feature at the given point. +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { +md, _ := metadata.FromIncomingContext(ctx) +fmt.Printf("Authorization: %+v\n", md.Get("Authorization")) +``` + +You can also modify these policies' error return conditions, so they return custom gRPC status codes. + +### Backend Configuration + +There is a separate policy for configuring upstream connection behavior specifically for the gRPC backend service. + +- In the web Interface, select the **Backend Config** policy. +- In the REST API, use the `grpc-backend-config` policy. + +By default, the following actions have a configured timeout of 7 days: + +- Reading client request headers (`client_header_timeout`) +- Reading client request body (`client_body_timeout`) +- Reading a response from the upstream gRPC server (`grpc_read_timeout`) +- Transmitting a request to the upstream gRPC server (`grpc_send_timeout`) + +You can configure this policy to override most of these values. + +### Health Check + +gRPC-specific health checks can be configured for backends that implement the [official protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md), as well as those that don't by using an [unimplemented status code](https://docs.nginx.com/nginx/admin-guide/load-balancer/grpc-health-check/#grpc-servers-that-do-not-accept-health-checking-protocol). + +Conventional HTTP-based health checks can also be configured, but they cannot be used alongside gRPC healthchecks. + +### Customize gRPC Status Codes + +You can customize the following policies' [gRPC status code](https://github.com/grpc/grpc/blob/master/doc/statuscodes.md) values: + +- Rate Limit +- API Key +- Basic Auth +- JWT Assertion +- OAuth2 Introspection +- ACL IP +- TLS Inbound +- Request Body Size Limit + +- In the web interface, any policy which contains *Error Handling* properties accepts `grpcStatusCode` rather than HTTP `returnCode`. +- In the API, any policy with the `errorReturnConditions` object that contains a `returnCode` property, or a `returnCode` property at the top level, accepts a `grpcStatusCode` instead of or in addition to `returnCode`. + +
    + Example JSON request + + ```json + "policies": { + "acl-ip": [ + { + "action": { + "allow": ["10.0.0.2"], + "grpcStatusCode": 13 + } + } + ] + } + ``` + +
    + +### Unsupported Policies + +The following policies are not supported at this time: + +- Proxy Cache +- CORS +- Allow HTTP Method diff --git a/content/nms/acm/how-to/policies/health-check.md b/content/nms/acm/how-to/policies/health-check.md new file mode 100644 index 000000000..04e6ebb9e --- /dev/null +++ b/content/nms/acm/how-to/policies/health-check.md @@ -0,0 +1,309 @@ +--- +description: Learn how to create, configure, and implement health check policies for + your HTTP and gRPC API Proxies using F5 NGINX Management Suite API Connectivity Manager. +docs: DOCS-1125 +doctypes: +- reference +tags: +- docs +title: Health Check +toc: true +weight: 610 +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro.md" >}} + +## About the Policy + +API Connectivity Manager can configure your API Proxies to continually test your backend service targets (upstream servers), avoid the servers that have failed, and gracefully add the recovered services to a load-balanced group. This continuous testing is also known as "Active Health Checks," whereas "Passive Health Checks" monitor transactions as they occur. + +More information on NGINX health checks can be found at: + +- +- + +### Intended Audience + +{{< include "acm/how-to/policies/api-owner-persona.md">}} + +--- + +## HTTP Health Checks + +### Before You Begin + +To complete the steps in this guide, you need the following: + +- API Connectivity Manager is installed, licensed, and running +- You have [one or more Environments with an API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway.md" >}}) +- You have [published one or more API Gateways]({{< relref "/nms/acm/getting-started/publish-api-proxy.md" >}}) +- Your backend service(s) has an HTTP health check endpoint and/or can return status codes in the range from 200 through 399 for health check requests. + +### Policy Settings + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Datatype | Possible Values |
    Description
    | Required | Default value | +|----------------------------------------------------------------|----------|----------------------------|-----------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `targetBackendPolicyLabel` | string | Example: `default` | This field is used to target a specific backend by label. | No | `default` | +| `transportProtocol` | string | One of `["http"]` | The transport protocol used by the service. Only http is supported for now. | No | `http` | +| `isMandatory` | bool | `true/false` | Requires every newly added server to pass all configured health checks before F5 NGINX Plus sends traffic to it. | No | `false` | +| `persistent` | bool | `true/false` | Determines whether previous state is remembered after reloading configuration. | No | `false` | +| `port` | int | In range `1-65535` | The port on the service that will provide the health check. | No | N/A | +| `interval` | int | Integer (Max 2147483647) | The length of time between each health check sent from Nginx to the respective service. | No | 5 | +| `unhealthyThreshold` | int | Integer (Max 2147483647) | Denotes the number of failed checks before the service is considered unhealthy. | No | 1 | +| `healthyThreshold` | int | Integer (Max 2147483647) | Denotes the number of successful checks before the service is considered healthy. | No | 1 | +| `http`
    `.uriPath` | string | Example: `/health` | The URI used for the health check and is appended to the server domain name or IP address | No | `/` | +| `http`
    `.responseMatch`
    `.statusCode`
    `.exact` | int | In range `100-599` | List of specific status codes to match against | No | N/A | +| `http`
    `.responseMatch`
    `.statusCode`
    `.range` | string | Example: `["200-399"]` | List of status code ranges to match against | No | N/A | +| `http`
    `.responseMatch`
    `.header`
    `.name` | string | Example: `header-name` | Any valid header value from the response | Yes | N/A | +| `http`
    `.responseMatch`
    `.header`
    `.value` | string | Example: `header-value` | Any valid header name from the response | Yes | N/A | +| `http`
    `.responseMatch`
    `.header`
    `.condition` | string | Regex: `^([=!~]\|!~)$` | The matching operator for the header. Uses NGINX Health Check `match` directive syntax | Yes | N/A | +| `http`
    `.responseMatch`
    `.body`
    `.requiredVariable` | string | Example: `jsonFieldKey` | Field in json of body to match against | No | N/A | +| `http`
    `.responseMatch`
    `.body`
    `.value` | string | Example: `jsonFieldValue` | Any valid body content to be matched against | Yes | N/A | +| `http`
    `.responseMatch`
    `.body`
    `.condition` | string | Regex: `^!?~$` | The matching operator for the body. Uses NGINX Health Check `match` directive syntax | Yes | N/A | +| `connectTimeout` | string | Example: `60s` | Sets a timeout for establishing a connection with a proxied server. Uses NGINX time measurement syntax | No | `1s` | +| `readTimeout` | string | Example: `60s` | Sets a timeout for reading a response from the proxied server. Uses NGINX time measurement syntax | No | `1s` | + +{{< /bootstrap-table >}} + + +### Create an HTTP Health Check Policy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create an HTTP health check policy, send an HTTP `POST` to the Proxies endpoint. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces/{SERVICE_WORKSPACE_NAME}/proxies` | + +{{
    }} + + +
    +JSON request + +``` json +{ + "policies": { + "backend-health-check": [ + { + "action": { + "targetBackendPolicyLabel": "default", + "transportProtocol": "http", + "isMandatory": true, + "persistent": true, + "port": 8080, + "interval": 5, + "unhealthyThreshold": 3, + "healthyThreshold": 2, + "http": { + "uriPath": "/health_check", + "responseMatch": { + "statusCode": { + "range": [ + "200-399" + ] + }, + "header": { + "name": "some-header", + "value": "ok", + "condition": "=" + }, + "body": { + "requiredVariable": "jsonField", + "value": "some-response-body", + "condition": "~" + } + } + }, + "connectTimeout": "10s", + "readTimeout": "10s" + } + } + ] + } +} +``` + +
    + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To create a Health Check policy using the web interface: + +1. {{< include "acm/webui-acm-login.md" >}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the left-side *API Proxy* menu, select **Policies**. +1. On the *Advanced > Policies* page, on the **API Proxy** tab, locate the **Backend Health Check** policy. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. +1. Complete the necessary fields: + + - **Apply the policy to**: Specify the label that was assigned to the backend service if it's different from the default value `default`. + - **Transport Protocol**: Specify the transport protocol of the health check. Currently, only HTTP is supported. + - **Is Mandatory**: Toggle the switch to on if every new service target (server) must pass all configured health checks before NGINX Plus sends traffic to it. + - **Port**: If the health check needs to be conducted on a port other than the one specified for the backend service targets, specify the port to use. + - **Interval**: The length of time between each health check sent from NGINX Plus to the backend service targets. + - **Unhealthy Threshold**: Denotes the number of failed checks before the service is considered unhealthy. + - **Health Threshold**: Denotes the number of successful checks before the service is considered healthy. + - **URI Path**: The endpoint (URI) that NGINX Plus uses for the health check requests. + - **Status Code Exact**: The list of specific HTTP status codes to match against in the backend response. + - **Status Code Range**: The list of HTTP status code ranges to match against in the backend response. + - **Header Name**: The name of the header to use in the backend response matching. + - **Header Condition**: The operator used when checking the header value. Refer to the [NGINX `match` directive documentation](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html?&_ga=2.33487351.893608448.1680639753-1533979881.1676506809#match) for specifics. + - **Header Value**: The header value to use in the backend response matching. + - **Body Required Variable**: The field in the JSON of the backend response body to match against. + - **Body Condition**: The operator used when checking the body value. Refer to the [NGINX `match` directive documentation](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html?&_ga=2.33487351.893608448.1680639753-1533979881.1676506809#match) for specifics. + - **Body Value**: The body value to use in the backend response matching. + - **Connection Timeout**: Sets a timeout for establishing a connection with a proxied server. Follows NGINX configuration file measurement units syntax. + - **Read Timeout**: Sets a timeout for reading a response from the proxied server. Follows NGINX configuration file measurement units syntax. + +1. Select **Add** to apply the Health Check policy to the API Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} + +{{}} + +### Verify the Policy + +Confirm that the policy has been set up and configured correctly by taking these steps: + +- Check that your backend service targets (upstream servers) are receiving health check endpoint calls. +- When `isMandatory` is set to `true`, verify that your backend service targets are not receiving proxied traffic until they clear the health checks. +- When `persistent` is set to `true`, the state and behavior for `interval`, `unhealthyThreshold`, `healthyThreshold`, and timeout-related parameters should be preserved between subsequent deployments of API proxies and environments. + +--- + +## gRPC Health Checks + +### Before You Begin + +To complete the steps in this guide, you need the following: + +- API Connectivity Manager is installed, licensed, and running +- You have [one or more Environments with a gRPC API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway.md" >}}) +- You have [published one or more gRPC API Gateways]({{< relref "/nms/acm/how-to/services/publish-grpc-proxy.md" >}}) +- Your backend service(s) implements the [gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md#grpc-health-checking-protocol) and/or returns a status code (normally `12` for `unimplemented`) for health check requests. + +### Policy Settings + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Datatype | Possible Values | Description | Required | Default value | +|------------------------|----------|----------------------------|-----------------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `targetBackendLabel` | string | Example: `default` | This field is used to target a specific backend by label. | No | `default` | +| `mandatory` | bool | `true/false` | Requires every newly added server to pass all configured health checks before NGINX Plus sends traffic to it. | No | `false` | +| `persistent` | bool | `true/false` | Determines whether previous state is remembered after reloading configuration. | No | `false` | +| `port` | int | In range `1-65535` | The port on the service that will provide the health check. | No | N/A | +| `interval` | int | Integer (Max 2147483647) | The length of time between each health check sent from Nginx to the respective service. | No | N/A | +| `passes` | int | Integer (Max 2147483647) | Denotes the number of successful checks before the service is considered healthy. | No | N/A | +| `fails` | int | Integer (Max 2147483647) | Denotes the number of unsuccessful checks before the service is considered unhealthy. | No | N/A | +| `grpc`
    `.service` | string | Example: `RouteGuide` | Defines the target GRPC service to be used for this health check | No | N/A | +| `grpc`
    `.status` | int | Example: `12` | The expected GRPC status code return code from the upstream gRPC backend to conclude that the health check was successful | No | N/A | +| `connectTimeout` | string | Example: `60s` | Sets a timeout for establishing a connection with a proxied server. Uses NGINX time measurement syntax | No | `1s` | +| `readTimeout` | string | Example: `60s` | Sets a timeout for reading a response from the proxied server. Uses NGINX time measurement syntax | No | `1s` | + +{{< /bootstrap-table >}} + + +### Create a gRPC Health Check Policy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create a gRPC health check policy, send an HTTP `POST` to the Proxies endpoint. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +
    +JSON request + +``` json +{ + "policies": { + "grpc-backend-health-check": [ + { + "action": { + "mandatory": true, + "persistent": true, + "port": 84, + "interval": 7, + "fails": 3, + "passes": 5, + "connectTimeout": "6s", + "readTimeout": "5s", + "grpc": { + "status": 12 + } + } + } + ] + } +} +``` + +
    + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To create a gRPC Health Check policy using the web interface: + +1. {{< include "acm/webui-acm-login.md" >}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the left-side *API Proxy* menu, select **Policies**. +1. On the *Advanced > Policies* page, on the **API Proxy** tab, locate the **gRPC Backend Health Check** policy. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. +1. Complete the necessary fields: + + - **Apply the policy to**: Provide the label that was assigned to a Backend Service if it is different from the default value `default + - **Is Mandatory**: Toggle the switch to on if every new service target (server) must pass all configured health checks before NGINX Plus sends traffic to it. + - **Port**: If the health check needs to be conducted on a port other than the one specified for the backend service targets, specify the port to use. + - **Interval**: The length of time between each health check sent from NGINX Plus to the backend service targets. + - The **gRPC** setting will determine which health check service will be contacted by NGINX and which status code will be expected. + +1. Select **Add** to apply the Health Check policy to the API Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} + +{{
    }} + +### Verify the Policy + +Confirm that the policy has been set up and configured correctly by taking these steps: + +- Check that your backend service targets (upstream servers) are receiving health check endpoint calls. You may also find the tools [grpcurl](https://github.com/fullstorydev/grpcurl) and [grpc-health-probe](https://github.com/grpc-ecosystem/grpc-health-probe) helpful for debugging. +- When `mandatory` is set to `true`, verify that your backend service targets are not receiving proxied traffic until they clear the health checks. +- When `persistent` is set to `true`, state and behavior for `interval`, `passes`, `fails`, and timeout related parameters should be preserved between subsequent deployments of API Proxies and Environments. diff --git a/content/nms/acm/how-to/policies/http-backend-configuration.md b/content/nms/acm/how-to/policies/http-backend-configuration.md new file mode 100644 index 000000000..983357e75 --- /dev/null +++ b/content/nms/acm/how-to/policies/http-backend-configuration.md @@ -0,0 +1,817 @@ +--- +description: Learn how to use the F5 NGINX Management Suite API Connectivity Manager + to manage HTTP API Gateways by applying a backend configuration policy. +docs: DOCS-1141 +doctypes: +- API Connectivity Manager +- api management +- concept +tags: +- docs +toc: true +weight: 650 +title: HTTP Backend Configuration +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro" >}} + +--- + +## About the Backend Configuration Policy + +The backend configuration policy allows API Owners to manage their backend services with a common set of configuration options. These configuration options are applied to all service targets in a given backend service. + +The backend configuration policy provides the ability to configure: + +- [Load balancing](#load-balancing) +- [Keep-Alive connections](#keep-alive-connections) +- [Connection settings](#connection-settings) +- [Queues](#queues) +- [Buffers](#buffers) +- [Session Cookies](#session-cookies) +- [NTLM Authentication](#ntlm-authentication) + +Later sections of this guide will cover each of these areas in turn. + +### Intended Audience + +{{< include "acm/how-to/policies/api-owner-persona.md">}} + +--- + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with an [API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}). +- You have published one or more [API Gateways]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}) + +--- + +## Workflow for Applying Policy + +To apply the policy or make changes to it, here's what you need to do: + +- [Edit an existing environment or create a new one]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). +- Check the advanced settings for the environment to see if the policy has been applied. +- Edit the policy to make changes for each environment. Save and publish the changes. + +--- + +## Target Backend Service + +It is possible to target specific backend services with a backend configuration policy through the use of labels. Backend services whose label matches that configured in the backend configuration policy target backend policy label will have that configuration applied. If no target backend policy label is provided, the backend configuration policy will be applied to all backend services with the label is set as default. + +### Configuring Target Backend Service + +Take the steps in this section to configure a backend configuration policy for specific backend service targets by label. In the example below, the backend configuration policy keepalive settings will be applied to all backend service targets with the `petstore-api` label. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to add a load balancer configuration to the API Proxy through the backend-config policy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "policies": { + "backend-config": [ + { + "action": { + "targetBackendPolicyLabel" : "petstore-api", + "keepCacheConnectionAlive": 32, + "keepAliveRequests": 1000, + "keepAliveTime": "1h", + "keepAliveTimeout": "60s" + } + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|----------------------------|---------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|---------------| +| `targetBackendPolicyLabel` | string | Example: `petstore-api` | Target backend labels for policy application. If not supplied this backend service configuration would be applied to the default backend service of the API proxy. | No | `default` | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. {{< include "acm/webui-acm-login.md">}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. +1. To apply the backend configuration policy to backend service targets, set the **Target Backend Policy Label** as the label of the backend service targets. +1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{}} + +--- + +## Load Balancing + +Six load balancing options are available; round robin (default), least connections, least response time, hashed key value, IP hash, or random. + +### Balancing Algorithms + +#### Round Robin + +This algorithm distributes requests to the application in a round-robin fashion to each backend service target in equal and circular order. As the default load balancing algorithm, it applies to all upstream server blocks containing backend service targets. + +#### Least Connections + +This algorithm distributes requests to the server with the least number of active connections. If there are several servers, they are tried sequentially using the round-robin balancing method. + +#### Least Time + +{{< note >}} This load balancing algorithm is available as part of the F5 NGINX Plus commercial subscription. {{}} + +This algorithm distributes requests to the server with the least average response time and least number of active connections. If there are several servers, they are tried sequentially using the round-robin balancing method. + +If the `HEADER` measurement is specified, the time to receive the response header is used. If the `LAST_BYTE` measurement is specified, the time to receive the full response is used. If the `LAST_BYTE_INFLIGHT` parameter is specified, incomplete requests are also considered. + +#### Hash + +This algorithm distributes requests with client-server mapping based on the hashed `key` value. The `key` can contain text, variables, and their combinations. Note that adding or removing a server from the group may result in remapping most of the keys to different servers. The method is compatible with the [Cache::Memcached](https://metacpan.org/pod/Cache::Memcached) Perl library. + +If the `consistent` parameter is specified, the [ketama](https://www.metabrew.com/article/libketama-consistent-hashing-algo-memcached-clients) consistent hashing method will be used instead. The method ensures that only a few keys will be remapped to different servers when a server is added to or removed from the group. This helps to achieve a higher cache hit ratio for caching servers. The method is compatible with the [Cache::Memcached::Fast](https://metacpan.org/pod/Cache::Memcached::Fast) Perl library with the `ketama_points` parameter set to 160. + +#### IP Hash + +This algorithm distributes requests between servers based on client IP addresses. The first three octets of a client's IPv4 address, or an entire IPv6 address are used as a hashing key, ensuring that requests from the same client will always be passed to the same server except when the server is unavailable. In the latter case, client requests will be passed to another server. Most probably, it will always be the same server as well. + +If one of the servers needs to be temporarily removed, it should be marked with the down parameter to preserve the current hashing of client IP addresses. + +### Configuring a Load Balancer + +Follow the steps in this section to configure request load balancing across backend service targets. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to add a load balancer configuration to the API Proxy through the backend-config policy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "policies": { + "backend-config": [ + { + "action": { + "loadBalancing": { + "algorithm": "ROUND_ROBIN", + "leastTimeMeasurement": "HEADER", + "hashKey": "$request_uri", + "consistentHashing": true, + "randomTwo": true, + "randomMethod": "LEAST_CONN" + } + } + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|------------------------|---------|--------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|---------------| +| `algorithm` | string | One of:
    [`ROUND_ROBIN`, `LEAST_CONN`, `LEAST_TIME`, `HASH`, `IP_HASH`, `RANDOM`] | The load balancing algorithm to use. Default `ROUND_ROBIN` is used without any configuration. | No | `ROUND_ROBIN` | +| `leastTimeMeasurement` | string | One of:
    [`HEADER`, `LAST_BYTE`, `LAST_BYTE_INFLIGHT`] | Optional configuration option for `LEAST_TIME` algorithm. The measurement used to determine `LEAST_TIME`. | No | `HEADER` | +| `hashKey` | string | Text, variables, and their combinations. | Required configuration option for `HASH` algorithm. Example: `$request_uri` | Semi-optional | N/A | +| `consistentHashing` | boolean | `true/false` | Optional configuration option for `HASH` algorithm. Uses ketama consistent hashing method. | No | `true` | +| `randomTwo` | boolean | `true/false` | Optional configuration option for `RANDOM` algorithm. Instructs NGINX to randomly select two servers and then choose a server using the specified `randomMethod`. | No | `true` | +| `randomMethod` | string | One of:
    [`LEAST_CONN`, `LEAST_TIME`, `LAST_TIME_HEADER`, `LEAST_TIME_LAST_BYTE`] | Optional configuration option for `RANDOM` algorithm. Specifies which load balancing algorithm to use for a randomly selected server. | No | `LEAST_CONN` | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. {{< include "acm/webui-acm-login.md">}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. +1. To enable a load balancer other than the default round-robin, enable the toggle for **Add an alternate load balancer**. +1. Select your **Load Balancing Algorithm** from the drop-down menu. + - For `LEAST_TIME` define the **Least Time Measurement** + - For `HASH` define the **Hash Key** and if **Consistent Hashing** is required. + - For RANDOM set if **Random Two** should be used and the **Random Method** load balancing algorithm. +1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{
    }} + +--- + +## Keep-Alive Connections + +HTTP keepalive (persistent) connections [[RFC-2068]](https://www.rfc-editor.org/rfc/rfc2068.html#section-8) are a necessary performance feature that reduce latency and allow web pages to load faster. HTTP uses a mechanism called keepalive connections to hold open the TCP connection between the client and the server after an HTTP transaction has completed. If the client needs to conduct another HTTP transaction, it can use the idle keepalive connection rather than creating a new TCP connection. + +If lots of clients use HTTP keepalives and the web server has a concurrency limit or scalability problem, then performance plummets once that limit is reached. It does not take many clients to exhaust the concurrency limit in many contemporary web and application servers and any thread‑ or process‑based web or application server is vulnerable to concurrency limitations. + +NGINX uses a different architecture that does not suffer from the concurrency problems described above. It transforms slow client connections to optimized benchmark‑like connections to extract the best performance from your servers. This allows each NGINX process to easily scale to tens, thousands, or hundreds of thousands of connections simultaneously. + +### Configuring Keep-Alive Connections + +Follow the steps in this section to configure HTTP keepalives for your backend service targets. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to add a keepalive connection configuration to the API Proxy through the backend-config policy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "policies" : { + "backend-config" : [ + { + "action" : { + "keepCacheConnectionAlive": 32, + "keepAliveRequests": 1000, + "keepAliveTime": "1h", + "keepAliveTimeout": "60s" + } + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|----------------------------|---------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `keepCacheConnectionAlive` | integer | integer >= `1` | Activates the cache for connections to upstream servers. Sets the maximum number of idle keepalive connections to upstream servers that are preserved in the cache of each worker process. When this number is exceeded, the least recently used connections are closed. | No | `32` | +| `keepAliveRequests` | integer | integer >= `1` | Sets the maximum number of requests that can be served through one keepalive connection. | No | `1000` | +| `keepAliveTime` | string | Example: `1h` | Limits the maximum time during which requests can be processed through one keepalive connection. Follows NGINX configuration time measurement units syntax. | No | `1h` | +| `keepAliveTimeout` | string | Example: `60s` | Sets a timeout during which an idle keepalive connection to an upstream server will stay open. Follows NGINX configuration time measurement units syntax. | No | `60s` | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. {{< include "acm/webui-acm-login.md">}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. +1. Go to the **Keep-Alive Connection Settings** section. +1. If non-default values are required, enter configuration values for: + - Keep-Alive Max Cache Connections Alive + - Keep-Alive Requests + - Keep-Alive Time + - Keep-Alive Timeout +1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{}} + +--- + +## Connection Settings + +The connection settings can be configured for maximum client request body size, establishing a connection timeout, maximum time for reading a response from the proxied server, or maximum time transmitting a request to the proxied server. + +### Client Max Body Size + +Sets the maximum allowed size of the client request body. If the size of a request exceeds the configured value, the `413 (Request Entity Too Large)` error is returned to the client. + +### Connect Timeout + +Defines a timeout for establishing a connection with a proxied server. Please note that this timeout cannot usually exceed 75 seconds. + +### Read Timeout + +Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. The connection is closed if the proxied server does not transmit anything within this time. + +### Send Timeout + +Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. The connection is closed if the proxied server does not receive anything within this time. + +### Configuring Connection Settings + +This section explains how to configure connection settings for your backend service targets. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to add request settings configuration to the API Proxy through the backend-config policy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "policies" : { + "backend-config" : [ + { + "action" : { + "clientMaxBodySize" : "2m", + "connectTimeout": "30s", + "readTimeout": "30s", + "sendTimeout": "30s" + } + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|---------------------|---------|-----------------|--------------------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `clientMaxBodySize` | string | Example: `2m` | Sets the maximum allowed size of the client request body. Follows NGINX configuration file measurement units syntax. | No | N/A | +| `connectTimeout` | string | Example: `30s` | Sets a timeout for establishing a connection with a proxied server. Follows NGINX configuration time measurement units syntax. | No | N/A | +| `readTimeout` | string | Example: `30s` | Sets a timeout for reading a response from the proxied server. Follows NGINX configuration time measurement units syntax. | No | N/A | +| `sendTimeout` | string | Example: `30s` | Sets a timeout for transmitting a request to the proxied server. Follows NGINX configuration time measurement units syntax. | No | N/A | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. {{< include "acm/webui-acm-login.md">}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. +1. Go to the **Connection Settings** section. +1. If non-default values are required, enter configuration values for: + - Connect Timeout + - Read Timeout + - Send Timeout + - Client Max Body Size +1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{}} + +--- + +## Queues + +If an upstream server cannot be selected immediately while processing a request, the request will be placed into the queue. The queue configuration specifies the maximum number of requests that can be in the queue simultaneously. If the queue is filled up, or the server to pass the request to cannot be selected within the time period specified in the timeout parameter, the `502 (Bad Gateway)` error will be returned to the client. + +### Configuring a Queue + +Follow the steps in this section to configure a queue for your backend service targets. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to add a queue configuration to the API Proxy through the backend-config policy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "policies" : { + "backend-config" : [ + { + "action" : { + "queue" : { + "maxNumberOfRequests": 10, + "timeOut": "60s" + } + } + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|-----------------------|---------|-----------------|--------------------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `maxNumberOfRequests` | integer | Example: `10` | Maximum number of requests that can be in the queue at the same time. If not set then no queue will be configured. | Yes | N/A | +| `timeout` | string | Example: `60s` | Sets a timeout for establishing a connection with a proxied server. Follows NGINX configuration time measurement units syntax. | No | `60s` | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. {{< include "acm/webui-acm-login.md">}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. +1. Go to the **Queue Settings** section. +1. To configure a queue, enable the toggle for **Add a queue**. + - Set the **Maximum number of requests** (required). + - Set the **Queue timeout" (default 60s). +1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{}} + +--- + +## Buffers + +{{}}See the [Module ngx_http_proxy_module](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) topic for more information about the directives mentioned in this section.{{}} + +When buffering is enabled, NGINX receives a response from the proxied server as soon as possible, saving it into the buffers set by the `proxy_buffer_size` and `proxy_buffers` directives. + +- Depending on the operating system, the `proxy_buffer_size` directive is 4 KB or 8 KB. This directive sets the buffer size for reading the first part of the response received from the proxied server. This part usually contains a small response header. By default, the buffer size is equal to one memory page. + +- The `proxy_buffers` directive controls the size and the number of buffers allocated for a request. Increasing the number of buffers lets you buffer more information. + +If the complete response doesn't fit into memory, a part can be saved to a temporary file on the disk. The default max size of this temporary file is 1024 MB, and the default write size is 8 KB or 16 KB, depending on the operating system. + +When configuring proxy buffers, the total size of the `proxy_buffers` (number * size) must be greater than the size of the `proxy_busy_buffers_size` minus one buffer. The default `proxy_busy_buffers_size` is 8 KB or 16 KB, depending on the operating system. + +If you get the error `[emerg] \"proxy_busy_buffers_size\"` `must be less than the size of all` `\"proxy_buffers\" minus one buffer` in NGINX in the data plane, it is because the proxy buffer total number and size are configured incorrectly. + +### Examples + +
    +Example valid Proxy Buffers number and size + +```text +proxy busy buffers size : 16 KB +proxy buffer number : 8 +proxy buffer size : 4 KB +total buffer size : 32 KB + +busy_buffers_size < total buffer size - buffer +16 KB < 32 KB - 4 KB +16 KB < 28 KB +True: Valid proxy buffer number & size configuration +``` + +
    + +
    +Example invalid proxy buffers number and size + +```text +proxy busy buffers size : 16 KB +proxy buffer number : 2 +proxy buffer size : 2k +total buffer size : 8 KB + +busy_buffers < total buffer size - buffer +16 KB < 8 KB - 2k +16 KB < 6k +False: Invalid proxy buffer number & size configuration +``` + +
    + +### Tuning Proxy Buffers Number and Size + +When using proxy buffering, we recommend that the complete response from upstream can be held in memory to avoid reading or writing to disk, which is significantly slower. + +If the response from upstream arrives fast and the client is slower, NGINX preserves the response in buffers, allowing it to close the upstream connection quickly. + +If the allocated buffer size doesn't allow storing the complete response in memory, it will be stored on disk, which is slower. + +Fine-tuning the `proxy_buffers` number and size depends on the body response size of your application. + +To determine the size of the HTML/data returned by a resource, you can use the following command: + +```bash +curl -so /dev/null https://nginx.org/ -w '%{size_download}' +``` + +Set `proxy_buffers` in a way that it equals the total maximum size of response data. + +For example, if the uncompressed body size is 8955 bytes (72 KB), you must set 72 KB worth of buffer size, either 18 4-KB-sized buffers or 9 8-KB-sized buffers. + +### Configuring Buffers + +Follow the steps in this section to configure buffers for your backend service targets. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to add a buffer configuration to the API Proxy through the backend-config policy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "policies" : { + "backend-config" : [ + { + "action" : { + "buffer": { + "number": 42, + "size": "16KB" + } + } + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|----------|---------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `number` | integer | integer >= `2` | Sets the number of buffers used for reading a response from the proxied server for a single connection. | Yes | N/A | +| `size` | string | size >= `1K` | Sets the size of the buffers used for reading a response from the proxied server for a single connection. Follows NGINX configuration file measurement units syntax. | Yes | `60s` | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. {{< include "acm/webui-acm-login.md">}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. +1. Go to the **Buffer Settings** section. +1. To configure a queue, enable the toggle for **Add a buffer**. + - Set the **Number of buffers** (required). + - Set the **Buffer size** (required). +1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{}} + +--- + +## Session Cookies + +Enables session affinity, which causes requests from the same client to be passed to the same server in a group of servers. With the cookie method used, information about the designated server is passed in an HTTP cookie generated by NGINX. + +A request from a client not yet bound to a particular server is passed to the server selected by the configured balancing method. Further requests with this cookie will be passed to the designated server. If the designated server cannot process a request, the new server is selected as if the client has not been bound yet. + +As a load balancing method always tries to evenly distribute the load considering already bound requests, the server with a higher number of active bound requests has less possibility of getting new unbound requests. + +### Configuring Session Cookies + +Folow the steps in this section to configure session cookies for your backend service targets. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to add a session cookie configuration to the API Proxy through the backend-config policy. If any configuration parameters are omitted, the corresponding fields are not set. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "policies" : { + "backend-config" : [ + { + "action" : { + "sessionCookie" : { + "name" : "auth_cookie", + "path" : "/path/to/set", + "expiresIn" : "1h", + "domainName" : ".example.com", + "httpOnly" : true, + "secure" : true, + "sameSite" : "STRICT" + } + } + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|--------------|---------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `name` | string | Example: `auth_cookie` | Sets the name of the cookie to be set or inspected. | Yes | N/A | +| `path` | string | Example: `/path/to/set` | Defines the path for which the cookie is set. | No | N/A | +| `expiresIn` | string | Example: `1h` | Sets cookie expiry. If the parameter is not specified, it will cause the cookie to expire at the end of a browser session. Follows NGINX configuration time measurement units syntax. | No | N/A | +| `domainName` | string | Example: `.example.com` | Defines the domain for which the cookie is set. Parameter value can contain variables. | No | N/A | +| `httpOnly` | boolean | `true/false` | Adds the `HttpOnly` attribute to the cookie. | No | N/A | +| `secure` | boolean | `true/false` | Adds the `Secure` attribute to the cookie. | No | N/A | +| `sameSite` | string | One of:
    [`STRICT`, `LAX`, `NONE`] | Adds the `SameSite` attribute to the cookie. | No | N/A | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. {{< include "acm/webui-acm-login.md">}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. +1. Go to the **Buffer Settings** section. +1. To configure session cookies, enable the toggle for **Session Affinity/Cookies Settings**. + 1. Set the **Name** of the cookie (required). + 1. Set the **Path** (optional). + 1. Set the cookie **Expires in** (optional). If the parameter is not specified, it will cause the cookie to expire at the end of a browser session. + 1. Set the **Domain Name** (optional). + 1. Enable the **HTTP Only** toggle to add the HttpOnly attribute to the cookie (optional). + 1. Enable the **Secure** toggle to add the Secure attribute to the cookie (optional). + 1. Set the **Same Site** attribute value (optional). +1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{
    }} + +--- + +## NTLM Authentication + +Allows proxying requests with [NTLM Authentication](https://en.wikipedia.org/wiki/Integrated_Windows_Authentication). The upstream connection is bound to the client connection once the client sends a request with the `Authorization` header field value starting with `Negotiate` or `NTLM`. Further client requests will be proxied through the same upstream connection, keeping the authentication context. When enabled, the HTTP Protocol version is set to 1.1. + +### Configuring NTLM Authentication + +Follow the steps in this section to configure session cookies for your backend service targets. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to enable NTLM authentication for the API Proxy through the backend-config policy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "policies" : { + "backend-config" : [ + { + "action" : { + "enableNTLMAuthn": false + } + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|-------------------|---------|-----------------|-----------------------------------------------------|----------|---------------| +| `enableNTLMAuthn` | boolean | `true/false` | Enables proxying requests with NTLM Authentication. | No | `false` | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. {{< include "acm/webui-acm-login.md">}} +1. On the left menu, select **Services**. +1. Select a workspace in the list that contains the API Proxy you want to update. +1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. +1. Go to the **Connection Settings** section. +1. To enable NTLM, enable the toggle for **Enable NTLM Authn**. +1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{}} + +--- + +## Reference Backend Configuration Policy API Request Body + +```json +{ + "policies": { + "backend-config": [ + { + "action": { + "targetBackendPolicyLabel" : "default", + "keepCacheConnectionAlive": 32, + "keepAliveRequests": 1000, + "keepAliveTime": "1h", + "keepAliveTimeout": "60s", + "connectTimeout": "30s", + "readTimeout": "30s", + "sendTimeout": "30s", + "clientMaxBodySize": "2m", + "enableNTLMAuthn": false, + "loadBalancing": { + "algorithm": "LEAST_CONN", + "leastTimeMeasurement": "HEADER", + "hashKey": "$request_uri", + "consistentHashing": true, + "randomTwo": true, + "randomMethod": "LEAST_CONN" + }, + "queue": { + "maxNumberOfRequests": 10, + "timeOut": "60s" + }, + "buffer": { + "number": 8, + "size": "8k" + }, + "sessionCookie": { + "name": "auth_cookie", + "path": "/", + "expiresIn": "1h", + "domainName": ".example.com", + "httpOnly": true, + "secure": true, + "sameSite": "strict" + } + } + } + ] + } +} +``` diff --git a/content/nms/acm/how-to/policies/introspection.md b/content/nms/acm/how-to/policies/introspection.md new file mode 100644 index 000000000..fdedbe5f2 --- /dev/null +++ b/content/nms/acm/how-to/policies/introspection.md @@ -0,0 +1,427 @@ +--- +description: API Owners can restrict access to their APIs with OAuth2 tokens. The + policy is configured to grant access to APIs after having tokens introspected. +docs: DOCS-953 +doctypes: +- API Connectivity Manager +- api management +- concept +tags: +- docs +toc: true +weight: 800 +Title: Introspection +--- + +## Overview + +API Connectivity Manager API Owners can restrict access to their APIs with OAuth2 tokens by swapping an opaque token for claims or JWT token to be proxied to the backend service. The policy can be configured to grant access to APIs after having the tokens introspected. In addition, the claims in the token can be extracted and forwarded to the backend service. + +--- + +## What is OAuth2? + +{{< include "acm/tutorials/what-is-OAuth2.md" >}} + +### OAuth2 Roles + +The idea of roles is part of the core specification of the OAuth2 Authorization Framework. These define the essential components of an +OAuth2 system: + +- **Resource Owner**: An entity capable of granting access to a protected resource. It could be a system or an end-user. +- **Client**: An application making protected resource requests on behalf of the Resource Owner and with its authorization. +- **Authorization Server**: The server that issues access tokens to the client after successfully authenticating the resource owner and + obtaining authorization. The authorization server exposes two endpoints: the Token endpoint, which is involved in a machine-to-machine interaction for issuing access tokens, and the Introspection endpoint, which is used by the Resource Server to validate client access tokens. +- **Resource Server**: The server protecting the user resources capable of accepting and responding to protected resource requests using + access tokens. In this guide, NGINX running within the API Connectivity Manager API-Proxy is the Resource Server. + +### Token Introspection + +The standard method for validating access tokens with an IdP is called _Token Introspection_. _OAuth2 Token Introspection_ +[[RFC 7662]](https://www.rfc-editor.org/rfc/rfc7662) is now a widely supported standard that describes a JSON/REST interface that a Resource Server uses to present a token to the IdP, and describes the structure of the response. It is supported by many of the leading IdP vendors and cloud providers. + +NGINX can be used to validate access tokens on behalf of backend services. This has several benefits: + +- Requests reach the backend services only when the client has presented a valid token +- Existing backend services can be protected with access tokens without requiring code changes +- Only the NGINX instance (not every app) needs to be registered with the IdP +- Behavior is consistent for every error condition, including missing or invalid tokens + +The _OAuth2 Token Introspection_ flow includes the following steps: + +{{OAuth2 Token Introspection Flow.}} + +--- + +## Set up OAuth2 Introspection Policy + +You can set up OAuth2 Introspection policy by using either the web interface or the REST API. + +### Edit the API-Proxy Settings + +{{}} + {{%tab name="Web Interface"%}} + +1. In the API Connectivity Manager user interface, select **Services > API Proxies**click the **...** icon in the **Actions** column for the API proxy that you want to enable the OAuth2 Introspection policy for, select **Edit Proxy**. +2. Under the **Advanced** section select **Policies**. +3. Under the **API Proxy** tab, locate the **OAuth2 Introspection** policy and click the **...** icon, select **Add Policy**. +4. Update **Client Request** settings. + +{{}} + +| Configuration Setting | Description | +|----------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Specifies the token's location in incoming user request | Specifies where the access token is supplied in the incoming user request and the key from which the access token can be extracted. The default behavior is as a Bearer token in the Authorization request header. | + +{{}} + +5. Update **Introspection Request** settings. + +{{}} + +| Configuration Setting | Description | +|-------------------------------------------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Enter the introspection endpoint | The IdP OAuth2 Token Introspection endpoint [[RFC 7662]](https://www.rfc-editor.org/rfc/rfc7662) where NGINX IdP client will send client `access_token`. | +| Enable SNI | Enables or disables passing of the server name through TLS Server Name Indication extension (SNI), [[RFC 6066]](https://www.rfc-editor.org/rfc/rfc6066) when establishing a connection with the proxied HTTPS server. | +| Override the default server name | Allows overriding the server name used to verify the certificate of the proxied HTTPS server and to be passed through SNI when establishing a connection with the proxied HTTPS server. By default, the host part of the `proxy_pass` URL is used. | + +{{}} + +6. Update **Credentials**. + +{{}} + +| Configuration Setting | Description | +|----------------------------- |--------------------------------------------------------------------------| +| Enter Client Application ID | Identifies the IdP Client making the token introspection request. | +| Enter Client Secret/Password | The IdP Client secret/password. | + +{{}} + +7. Update **Introspection Response** settings. + +{{}} + +| Configuration Setting | Description | +|-------------------------------------------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Specify the introspection response type | Whether the token introspection endpoint should respond with a JSON object or JSON Web Token (JWT). The default is application/json. | +| Specify the list of claims to forward as headers to the backend | Forward claims from the token introspection response in the proxy header to the backend service.

    Can only be applied if the introspection response is configured to application/json. | +| Enable JWT token forwarding to backend | Forward introspection token response to backend service.

    Can only be applied if the introspection response is configured to application/jwt. | +| Specify how long introspected tokens will be cached | Specifies how long the introspected tokens will be cached. Tokens will be refreshed from the URI endpoint after the duration. Set as **0** to disable.

    Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | +| Specify OAuth2 Token Type Hint | A hint about the token type submitted for introspection. The protected resource can pass this parameter to help the authorization server optimize the token lookup. Values for this field are defined in [[RFC6749]](https://www.rfc-editor.org/rfc/rfc6749). | + +{{
    }} + +8. Enable Introspection Token **Claim Verification**. To add a claim to verify click **+ Add a claim**, to add more than one claim to verify click the same symbol. To delete a claim click the **trash can** symbol for that claim. + +{{}} + +| Configuration Setting | Description | +|------ |---------------------------------------------------------------| +| Claim | The claim name. If the claim is nested, layers of depth are indicated with periods, example: `resource_access.account.roles`. | +| Type | The claim data type. | +| Delimiter | The claim value delimiter if value is a delimited string. | +| Value | The claim value to verify. | + +{{}} + +9. Enable **Resolver** if external DNS required. + +{{}} + +| Configuration Setting | Description | +|---------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Time Out | By default, NGINX caches answers using the TTL value of a response. The valid parameter allows overriding it.

    Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | +| Valid For | Sets a timeout for name resolution.

    Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | +| Hostname | The DNS Hostname or IP Address. Multiple DNS Resolvers can be added for a given OAuth2 Introspection Policy. | +| Listened Port | The DNS Port number | + +{{
    }} + +10. Update **Error Handling**. + +{{}} + +| Configuration Setting | Description | +|---------------------------------------------------- |-------------------------------------------------------------------------------------------------------------------------------------- | +| Specify authorization failed error code | The error code that needs to be used by the NGINX data plane when the backend service cannot find a token match or access is forbidden. | +| Specify authorization token not provided error code | The error code that needs to be used by the NGINX data plane when the backend service when a token is not supplied. | + +{{}} + +11. Select **Add**. +12. Select **Save and Submit**. + + {{%/tab%}} + {{%tab name="REST API"%}} + +Send a POST request to add the OAuth2 Introspection policy to the API-Proxy. + + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------| +| POST | `/services/workspaces//proxies` | + +{{}} + + +{{< note >}} While all request body configuration values are presented in the request body structure example below, not all configuration + values are compatible. Please see the configuration value description table for further information. {{< /note >}} + +
    +JSON request + +```json +{ + "name": "{{proxyName}}", + "version": "v1", + "proxyConfig": { + "hostname": "{{environmentHostname}}", + "ingress": { + "basePath": "/api" + }, + "backends": [ + { + "serviceName": "backend-svc", + "serviceTargets": [ + { + "hostname": "10.0.0.10" + } + ] + } + ], + "policies": { + "oauth2-introspection": [ + { + "action": { + "introspectionEndpoint": "https://example.idp.com:8443/oauth/v2/oauth-introspect", + "enableSNI": true, + "proxyTLSName": "test.oauth.com", + "introspectionResponse": "application/json", + "cacheIntrospectionResponse": "5m", + "clientTokenSuppliedIn": "HEADER", + "clientTokenName": "Authorization", + "authzServerTokenHint": "ACCESS_TOKEN", + "forwardToken": false, + "forwardedClaimsInProxyHeader": [ + "username", + "exp", + "scope" + ], + "verifyClaims": [ + { + "claim": "sub", + "type": "STRING", + "value": "a95117bf-1a2e-4d46-9c44-5fdee8dddd11" + }, + { + "claim": "scope", + "type": "STRING", + "value": "read write email", + "delimiter": "SPACE" + }, + { + "claim": "aud", + "type": "ARRAY", + "value": ["https://protected.example.net/resource"] + }, + { + "claim": "resource_access.account.groups", + "type": "STRING", + "value": "default-group" + }, + { + "claim": "resource_access.account.roles", + "type": "ARRAY", + "value": [ + "default-roles", + "offline_access", + ] + }, + { + "claim": "email_verified", + "type": "BOOLEAN", + "value": true + }, + { + "claim": "user-group", + "type": "INTEGER", + "value": 42 + } + ], + "resolver": { + "valid": "30s", + "timeout": "30s", + "servers": [ + { + "hostname": "example.com" + }, + { + "hostname": "10.0.0.11", + "port": 53 + } + ] + }, + "errorReturnConditions": { + "noMatch": { + "returnCode": 403 + }, + "notSupplied": { + "returnCode": 401 + } + } + }, + "data": [ + { + "clientAppID": "idp-client-app-id", + "clientSecret": "dbdaa3e1-f100-420x-bfd0-875bd8a77cd7" + } + ] + } + ] + } + } +} +``` + +
    + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Datatype | Possible Values | Description | Required | Default value | +|--------------------------------------- |------------------ |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |---------- |-------------------------------------------- | +| `introspectionEndpoint` | string | Example:
    `"https://idp.com/introspect"` | The IdP OAuth2 Token Introspection endpoint [[RFC 7662]](https://www.rfc-editor.org/rfc/rfc7662) where NGINX IdP client will send client `access_token`. | True | N/A | +| `enableSNI` | boolean | `true`/`false` | Enables or disables passing of the server name through TLS Server Name Indication extension (SNI), [[RFC 6066]](https://www.rfc-editor.org/rfc/rfc6066) when establishing a connection with the proxied HTTPS server. | False | `false` | +| `proxyTLSName` | string | Example: `test.oauth.com` | Allows overriding the server name used to verify the certificate of the proxied HTTPS server and to be passed through SNI when establishing a connection with the proxied HTTPS server. By default, the host part of the `proxy_pass` URL is used. | False | Host part of `introspectionRequest` | +| `introspectionResponse` | string | One of:
    [`"application/json"`,
    `"application/jwt"`] | Whether the token introspection endpoint should respond with a JSON object or JSON Web Token (JWT). | False | `"application/json"` | +| `cacheIntrospectionResponse` | string | Example: `"5m"` | Specifies how long the introspected tokens will be cached. Tokens will be refreshed from the URI endpoint after the duration. Set as `0s-m-h` to disable.

    Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | False | `"5m"` | +| `clientTokenSuppliedIn` | string | One of:
    [`"HEADER"`, `"QUERY"`] | Specifies where the access token is supplied in the incoming user request. | False | `"HEADER"` | +| `clientTokenName` | string | Example:
    `"Authorization"` | Specifies the key under which the access token can be extracted from in the incoming user request.

    Note: To maintain Bearer token behavior, `clientTokenSuppliedIn` must be set to `HEADER`, and `clientTokenName` must be set to `Authorization`. This is the default behavior of the Introspection Policy. | False | `"Authorization"` | +| `authzServerTokenHint` | string | One of:
    [`"ACCESS_TOKEN"`,
    `"REFRESH_TOKEN"`] | A hint about the type of the token submitted for introspection. The protected resource can pass this parameter to help the authorization server optimize the token lookup. Values for this field are defined in [[RFC6749]](https://www.rfc-editor.org/rfc/rfc6749). | False | N/A | +| `forwardToken` | boolean | `true`/`false` | Forward introspection token response to backend service.

    Can only be applied if the `introspectionResponse` is set to `application/jwt`. | False | `true` | +| `forwardedClaimsInProxyHeader` | array of strings | Standard claims can be found in
    _OAuth2 Token Introspection_
    [[RFC 7662]](https://www.rfc-editor.org/rfc/rfc7662).

    This is not an exhaustive list,
    IdPs and Resource Owners can
    configure their own claims. | Forward claims from the token introspection response in the proxy header to the backend service.

    Can only be applied if the `introspectionResponse` is set to `application/json`. | False | `["scope",`
    `"username",`
    `"exp"]` | +| `verifyClaims[].claim` | string | Example:
    `"resource_access.account.roles"` | The claim name. If the claim is nested, layers of depth are indicated with periods. | True | N/A | +| `verifyClaims[].type` | string | One of:
    [`"STRING"`, `"ARRAY"`, `"BOOLEAN"`, `"INTEGER"`] | The claim data type. | True | N/A | +| `verifyClaims[].delimiter` | string | One of:
    [`"SPACE"`, `"COMMA"`, `"PERIOD"`, `"PLUS"`, `"COLON"`, `"SEMI-COLON"`, `"VERTICAL-BAR"`, `"FORWARD-SLASH"`, `"BACK-SLASH"`, `"HYPHEN"`, `"UNDERSCORE"`] | The claim value delimiter if value is a delimited string. | Semi-Optional | N/A | +| `verifyClaims[].value` | - | Examples:
    `"test-user-1"`
    `"read write email"`
    `["default-roles","offline_access"]`
    `42`
    `true` | The claim value to verify. | True | N/A | +| `resolver.valid` | string | Example: `"30s"` | By default, NGINX caches answers using the TTL value of a response. The `valid` parameter allows overriding it.

    Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | False | `"30s"` | +| `resolver.timeout` | string | Example: `"30s"` | Sets a timeout for name resolution.

    Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | False | `"30s"` | +| `resolver.servers[].hostname` | string | Valid hostname or IP Address | The DNS Hostname. | True | N/A | +| `resolver.servers[].port` | int32 | Valid 32-bit integer | The DNS Port number. | False | N/A | +| `errorReturnConditions.noMatch` | integer | In range: `400` - `599` | The error code that needs to be used by the NGINX data plane when the backend service cannot find a token match or access is forbidden. | False | `403` | +| `errorReturnConditions.notSuppplied` | integer | In range: `400` - `599` | The error code that needs to be used by the NGINX data plane when the backend service when a token is not supplied. | False | `401` | +| `data.clientAppID` | string | Example:
    `"nginx-docs-client"` | Identifies the IdP Client making the token introspection request. | True | N/A | +| `data.clientSecret` | string | Example:
    `"db3e1-f100-420x-bfd0"` | The IdP Client secret/password. | True | N/A | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{
    }} + +--- + +## Enabling Server Name Indication + +A generic solution for running several HTTPS servers on a single IP address is the [TLS Server Name Indication (SNI)](https://en.wikipedia.org/wiki/Server_Name_Indication) extension [[RFC 6066]](https://www.rfc-editor.org/rfc/rfc6066), which allows a client to pass a requested server name during the SSL +handshake. This solution lets the server know which certificate it should use for the client connection. + +Some Authorization Servers require SNI to be enabled during the OAuth Introspection request SSL handshake. When an Authorization server needs SNI, the following error messages will appear in the NGINX error logs on the data plane proxy host. + +```log +2022/12/04 15:24:43 [warn] 9501#9501: *73 upstream server temporarily disabled while SSL +handshaking to upstream, client: 10.0.0.1, server: api.io, +request: "GET /api HTTP/1.1", subrequest: "/_oauth2_send_introspection_request_0a2f6842_default", +upstream: "https://test.oauth.com:443/test/oauth2/introspect", host: "api.io" + +2022/12/04 15:24:43 [error] 9501#9501: *73 js: OAuth unexpected response from introspection server +(HTTP 502): {"message":"Bad Gateway","status":"502"} + +2022/12/04 15:25:27 [error] 9500#9500: *79 SSL_do_handshake() failed (SSL: error:14094410:SSL +routines:ssl3_read_bytes:sslv3 alert handshake failure:SSL alert number 40) while SSL handshaking +to upstream, client: 10.0.0.1, server: api.io, request: "GET /api HTTP/1.1", +subrequest: "/_oauth2_send_introspection_request_0a2f6842_default", +upstream: "https://test.oauth.com:443/test/oauth2/introspect", host: "api.io" +``` + +To enable sending the SNI with the OAuth Introspection request, set the `oauth-introspection` policy `action.enableSNI` value to `true`. By default, the host part of the `action.introspectionRequest` value is used. To override the default behavior and send a different server name through SNI, set `action.proxyTLSName` as the server name required to verify the certificate of the Authorization Server. + +
    +JSON request + +```json +{ + "policies": { + "oauth2-introspection": [ + { + "action": { + "introspectionEndpoint": "https://example.idp.com:8443/oauth/v2/oauth-introspect", + "enableSNI": true, + "proxyTLSName": "test.oauth.com" + } + } + ] + } +} +``` + +
    + +If the override value provided in `action.proxyTLSName` is incorrect, the Authorization Server should respond with a `4xx` client error code. The following error log is an example of an incorrect override `action.proxyTLSName` found in the NGINX error logs on the data plane proxy host. + +```log +2022/12/04 15:27:12 [error] 7477#7477: *50 js: OAuth unexpected response from +introspection server (HTTP 403): Forbidden +``` + +In this example, the end user also gets a `403 Forbidden` response from the data plane proxy. If `action.cacheIntrospectionResponse` is enabled and `action.proxyTLSName` is changed from a correct value to an incorrect value, the cached access token is valid until it expires. When the cached access token expires, end users will see their requests to the data plane proxy return with `403 Forbidden` responses. + +The NGINX OAuth2 Introspection configuration used by API Connectivity Manager does not cache tokens if the introspection request status code is anything other than `200 Success`. Any introspection requests with user access tokens returning `4xx` or `5xx` response codes will work once the policy introspection configuration is corrected and the Authorization Server responds with status code `200`. + +## Policy Interoperability Considerations + +It is only possible to configure one OAuth2 Introspection Policy per Proxy in API Connectivity Manager. Only one set of `clientAppId` credentials can be +configured per OAuth2 Introspection Policy. + +While an OAuth2 Introspection policy is configured for a Proxy in API Connectivity Manager it is not possible to configure any of the following policies on +the same Proxy: + +1. API Key Authentication +2. Basic Authentication +3. JWT Assertion + +Similarly, if any of the above three policies are configured for a Proxy in API Connectivity Manager, it is not possible to additionally configure an OAuth +2.0 Introspection Policy. + +## Security Considerations + +### Token Caching + +Consumers of the introspection endpoint may wish to cache the response of the endpoint for performance reasons. As such, it is important +to consider the performance and security trade-offs when deciding to cache the values. For example, shorter cache expiration times will +result in higher security since the resource servers will have to query the introspection endpoint more frequently, but will result in an +increased load on the endpoint. Longer expiration times leave a window open where a token may actually be expired or revoked, but still be +able to be used at a resource server for the remaining duration of the cache time. + +One way to mitigate this problem is for consumers to never cache the value beyond the expiration time of the token, which would have been +returned in the `“exp”` parameter of the introspection response. + +### JWT Introspection Responses + +The introspection response type `application/jwt`, configured through `action.introspectionResponse`, has not had its security protocol +specification finalised at the time of development and writing it remains in **DRAFT** state. The draft specification _JWT Response for_ +_OAuth Token Introspection_ can be found [here](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-jwt-introspection-response). + +While in this state the specification is likely to change at any time, and how we implement it in API Connectivity Manager may change to meet the requirements of +the specification. We recommend that the default OAuth2 Introspection response type `application/json` is used for all production +scenarios. + +## Related Links + +- [RFC-6749: The OAuth2 Authorization Framework](https://www.rfc-editor.org/rfc/rfc6749) +- [RFC 7662: OAuth2 Token Introspection](https://www.rfc-editor.org/rfc/rfc7662) +- [IETF-Draft: JWT Response for OAuth Token Introspection](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-jwt-introspection-response) +- [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html) diff --git a/content/nms/acm/how-to/policies/jwt-assertion.md b/content/nms/acm/how-to/policies/jwt-assertion.md new file mode 100644 index 000000000..53885918f --- /dev/null +++ b/content/nms/acm/how-to/policies/jwt-assertion.md @@ -0,0 +1,195 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to secure + API Gateways by applying an OAuth2 JSON Web Token (JWT) Assertion policy. +docs: DOCS-1119 +doctypes: +- API Connectivity Manager +- api management +- concept +tags: +- docs +toc: true +weight: 900 +title: JWT Assertion +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro" >}} + +--- + +## OAuth2 JWT Assertion + +Authentication & authorization policies allow a user to restrict access to their APIs by determining the caller's identity and access level. There are several API Gateway authentication/authorization policy types supported by API Connectivity Manager: API key authentication, basic authentication, OAuth2 JWT assertion, and OAuth2 token introspection. This guide focuses specifically on OAuth2 JWT Assertion. + +[JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519) (JWTs, pronounced “jots”) are a compact and highly portable means of exchanging identity information. JWTs can be used for client authorization and are a better way to control access to web‑based APIs than traditional API keys. Using JWTs as API keys provides a high‑performance alternative to traditional API keys, combining best‑practice authorization technology with a standards‑based schema for exchanging identity attributes. + +API Connectivity Manager API owners can restrict access to their APIs with JWTs. The API Proxy Policy can be configured to grant access to APIs only after validating a client's JWT. + +{{OAuth2 JWT Assertion Flow.}} + +--- + +## Anatomy of a JWT + +JWTs have three parts: a header, a payload, and a signature. In transmission, they look like the following (line breaks have been added for readability, the actual JWT is a single string): + +```jwt +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9. +eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ. +SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c +``` + +A period (`.`) separates the header, payload, and signature. The header and payload are [Base64‑encoded](https://www.rfc-editor.org/rfc/rfc4648#section-5) JSON objects. The signature is encrypted using the algorithm specified by the alg header, which we can see when we decode our sample JWT: + + +{{}} + +| | Encoded | Decoded | +|---------|---------|---------| +| Header | `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9` | `{`
        `"alg": "HS256",`
        `"typ": "JWT"`
    `}` | +| Payload | `eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6`
    `IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ`= | `{`
        `"sub": "1234567890",`
        `"name": "John Doe",`
        `"iat": 1516239022`
    `}` | + +{{
    }} + + +The [JWT standard](https://www.rfc-editor.org/rfc/rfc7519) defines several signature algorithms. The value HS256 in the example refers to HMAC SHA‑256. F5 NGINX Plus supports the HSxxx, RSxxx, and ESxxx [signature algorithms](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html) that are defined in the [standard](https://www.rfc-editor.org/rfc/rfc7518#section-3.1). The ability to cryptographically sign JWTs makes them ideal to be used for client authorization. + +--- + +## How NGINX Plus Validates a JWT + +A JWT is considered to be valid when the following conditions are met: + +1. The signature can be verified with a local or remote [JSON Web Key](https://datatracker.ietf.org/doc/html/rfc7517) (matching on the `kid` (“key ID”), if present, and `alg` (“algorithm”) header fields). +2. The JWT is presented inside the validity period when defined by one or both of the `nbf` (“not before”) and `exp` (“expires”) claims. + +--- + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with an [API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}). +- You have published one or more [API Gateways]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}) + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +### How to Access the REST API + +{{< include "acm/how-to/access-acm-api" >}} + +--- + +## Create an OAuth2 JWT Assertion Policy + +Take the steps in this section if you would like to restrict access to APIs to clients with a valid JWT. You can set up an OAuth2 JWT Assertion policy using either the web interface or the REST API. + +{{}} +{{%tab name="API"%}} + +Send a `POST` request to add the OAuth2 JWT Assertion policy to the API Proxy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +{{< warning >}} Local JSON Web Key usage with the policy configuration value `jwksKeys[]` is recommended for test/debugging environments only. For production environments, `jwksURI` should be used for remote JSON Web Key retrieval. {{< /warning >}} +{{< note >}} While all request body configuration values are presented in the request body structure example below, not all configuration values are compatible. See the configuration value description table for further information. {{< /note >}} + +```json +{ + "policies": { + "oauth2-jwt-assertion": [ + { + "action": { + "jwksURI": "https://idp.io:8443/oauth/certs", + "cacheKeysDuration": "12h", + "jwksKeys": [ + { + "k": "bXlzZWNyZXQ", + "kid": "0001", + "kty": "oct" + } + ], + "tokenName": "Authorization", + "tokenSuppliedIn": "HEADER", + "errorReturnConditions": { + "notSupplied": { + "returnCode": 401 + }, + "noMatch": { + "returnCode": 403 + } + } + } + } + ] + } +} +``` + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default value | +|---|---|---|---|---|---| +| `jwksURI` | string | Example:
    `https://idp.io:8443/oauth/certs` | URI endpoint that contains public keys used to verify the JWT signature.

    Not required if `jwksKeys[]` is populated. | Semi-optional | N/A | +| `cacheKeysDuration` | string | Example: `12h` | Specifies how long the keys will be cached. Keys will be refreshed from the URI endpoint after the duration.

    Only valid for `jwksURI`, not applicable for `jwksKeys[]`. Follows [NGINX configuration time measurement](http://nginx.org/en/docs/syntax.html) units syntax. | No | `"12h"` | +| `jwksKeys[]` | array of JSON Web Keys | Example in policy request body. | Keys to be used to verify JWT signatures. User should supply key data in valid JSON Web Key format. See related standards for [JWK](https://datatracker.ietf.org/doc/html/rfc7517), [JWK Set Format](https://datatracker.ietf.org/doc/html/rfc7517#section-5), and the [jwksKeys parameter](https://datatracker.ietf.org/doc/html/rfc7517#section-5.1).

    Not required if `jwksURI` is populated. | Semi-optional | N/A | +| `tokenName` | string | Example: `Authorization` | The name of the header or query parameter where the JWT will be located in the API request.

    In the case of default case of `Authorization` Header, the JWT token is required to adhere to the [Bearer Token usage](https://www.rfc-editor.org/rfc/rfc6750) standard.

    Example: `Authorization: Bearer ` where `}` is the Base64 encoded Client JWT. | No | `"Authorization"` | +| `tokenSuppliedIn` | string | One of: [`"HEADER"`, `"QUERY"`] | Specifies where the access token is supplied in the incoming user request. | No | `"HEADER"` | +| `errorReturnConditions`
    `.notSupplied`
    `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when an JWT is not supplied. | No | 401 | +| `errorReturnConditions`
    `.noMatch`
    `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when an invalid JWT is supplied. | No | 403 | + +{{< /bootstrap-table >}} + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. In the API Connectivity Manager user interface, go to **Services > \{your workspace}**, where "your workspace" is the workspace that contains the API Proxy. +2. Select **Edit Proxy** from the **Actions** menu for the desired API Proxy. +3. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **JSON Web Token Assertion**. +4. Choose the **JSON Web Key Set (JWKS) source**, for remote JWKS select **Enter a URI**, for local JWKS select **Enter a JWKS**. + - For JWKS Uri enter the JWKS URI as **URI location** and specify for how long the API Proxy should **cache the keys**, set to **0** to disable. + - For JWKS add an array of JSON Web Keys in JSON Web Key Set format. See related standards for [JWK](https://datatracker.ietf.org/doc/html/rfc7517), [JWK Set Format](https://datatracker.ietf.org/doc/html/rfc7517#section-5), and the [Keys](https://datatracker.ietf.org/doc/html/rfc7517#section-5.1) parameter. Example usage: + + ```json + { + "keys": [ + { + "k": "bXlzZWNyZXQ", + "kid": "0001", + "kty": "oct" + } + ] + } + ``` + +5. Specify **how the token is presented** in the request, either in the request **Headers** or as a **Query** parameter.. +6. Set custom error return code conditions if an JWT is **not supplied** or **validation fails**. +7. Select **Add** to apply the OAuth2 JWT Assertion policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{
    }} + +## Related Links + +- [NGINX Blog: Authenticating API Clients with JWT and NGINX Plus](https://www.nginx.com/blog/authenticating-api-clients-jwt-nginx-plus/#Configuring-NGINX Plus-as-an-Authenticating-API-Gateway) +- [[RFC-6749] The OAuth 2.0 Authorization Framework](https://www.rfc-editor.org/rfc/rfc6749) +- [[RFC-6750] The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://www.rfc-editor.org/rfc/rfc6750) +- [[RFC-7517] JSON Web Key (JWK)](https://datatracker.ietf.org/doc/html/rfc7517) +- [[RFC-7519] JSON Web Token (JWT)](https://datatracker.ietf.org/doc/html/rfc7519) +- [[RFC-7521] Assertion Framework for OAuth 2.0 Client Authentication and Authorization Grants](https://www.rfc-editor.org/rfc/rfc7521) +- [[RFC-7523] JSON Web Token (JWT) Profile for OAuth 2.0 Client Authentication and Authorization Grants](https://www.rfc-editor.org/rfc/rfc7523) diff --git a/content/nms/acm/how-to/policies/log-format.md b/content/nms/acm/how-to/policies/log-format.md new file mode 100644 index 000000000..6933d0cba --- /dev/null +++ b/content/nms/acm/how-to/policies/log-format.md @@ -0,0 +1,162 @@ +--- +description: As an Infrastructure Administrator, use this guide to implement a standard + log format for all environments hosting APIs. +docs: DOCS-1127 +doctypes: +- API Connectivity Manager +- api management +- task +- reference +tags: +- docs +toc: true +weight: 700 +title: Log Format +--- + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +--- + +## About the Policy + +The Log Format policy enables Infrastructure Admins to set the format for access logs. Detailed access logs are generated in either JSON (default) or Syslog format and are applied to new environments automatically. This policy can be customized to filter log content, adjust log severity levels, and designate log destinations. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} + +--- + +## Workflow for Applying Policy + +To apply the policy or make changes to it, here's what you need to do: + +- [Edit an existing environment or create a new one]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). +- Check the advanced settings for the environment to see if the policy has been applied. +- Edit the policy to make changes for each environment. Save and publish the changes. + +--- + +## Policy Settings {#policy-settings} + +The following table lists the configurable settings and their default values for the policy. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values |
    Description
    | Required | Default value | +|----------------------------------------------|-------------|--------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|--------------------------------------------------------------| +| `type` | string | One of:
    `[JSON,`
    `NATIVE]` | The access logs can be created in either JSON or native NGINX log format (Syslog). | Yes | `JSON` | +| `logFormat.include` | string/enum | One or more of:
    `["BASIC",`
    `"INGRESS",`
    `"BACKEND",`
    `"RESPONSE"]` | Specifies what information should be logged. | No | `["BASIC",`
    `"INGRESS",`
    `"BACKEND",`
    `"RESPONSE"]` | +| `logFormat.variables` | string/enum | `List of values` | Only variables included in this array will be logged; everything else will be hidden. | No | `Empty list []` | +| `errorLogSeverity` | string | One of:
    `[DEBUG,`
    `INFO,`
    `NOTICE,`
    `WARN,`
    `ERROR,`
    `CRIT,`
    `ALERT,`
    `EMERG]` | The minimum severity level of errors that will be logged. | No | `WARN` | +| `logDestination.type` | string/enum | One of:
    `["FILE",`
    `"SYSLOG"]` | The destination for the log output, either a file or syslog. | Yes | `FILE` | +| `logDestination.`
    `accessLogFileLocation` | string | `/var/log/nginx` | The directory in which the access log file will be saved. The directory can be any valid UNIX filepath, with relative paths being relative to the default NGINX configuration directory (`/etc/nginx/`). | Yes | `/var/log/nginx` | +| `logDestination.`
    `errorLogFileLocation` | string | `/var/log/nginx` | The directory in which the error log file will be saved. This directory can be any valid UNIX filepath, with relative paths being relative to the default NGINX configuration directory (`/etc/nginx/`). | No | `/var/log/nginx` | +| `enablePrettyPrint` | boolean | `true`,
    `false` | This setting adds whitespace and indentation to make JSON logs more easily readable for humans. This setting is applicable only when the `type` is set to `JSON`. | No | `false` | + +{{< /bootstrap-table >}} + + +--- + +## Applying the Policy + +In API Connectivity Manager, when an Infrastructure Administrator creates an environment, the following log format policy is applied by default: + +- Logs are in JSON format +- Logs are written to file +- Logs are saved to `/var/log/nginx` + +If these default options don't meet your requirements, you can customize the policy to suit your specific needs. Refer to the [Policy Settings](#policy-settings) section for the configurable options. + +
    + +{{}} +{{%tab name="API"%}} + +
    + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To add the Log Format policy using the REST API, send an HTTP `POST` request to the Environments endpoint. + + +{{}} + +| Method | Endpoint | +|--------|-------------------------------------------------------| +| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | + +{{}} + + +
    +JSON request + +```json +{ + "policies": { + "log-format": [ + { + "action": { + "enablePrettyPrint": false, + "errorLogSeverity": "WARN", + "logDestination": { + "type": "FILE", + "accessLogFileLocation": "/var/log/nginx/", + "errorLogFileLocation": "/var/log/nginx/" + }, + "logFormat": { + "include": [ + "BASIC", + "INGRESS", + "BACKEND", + "RESPONSE" + ], + "variables": [] + }, + "type": "JSON" + } + } + ] + } +} +``` + +This JSON example defines the log format policy for an environment: the error log severity level is set to `WARN`; the log file location is `/var/log/nginx/`; and the log format includes `BASIC`, `INGRESS`, `BACKEND`, and `RESPONSE` information without any variables specified to limit what is logged. The pretty print feature is disabled, and the log type is set to `JSON`. + +
    + +{{%/tab%}} +{{%tab name="UI"%}} + +
    + +To add the Log Format policy using the web interface: + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Infrastructure**. +3. On the *Workspaces* page, select a workspace in the list to which you'll add an environment. +4. Select **Add** to add the environment. +5. On the *Create Environment* form, complete the necessary fields: + + - **Name**: Enter a name for the environment. + - **Description**: Describe the environment in a few words. + - **Type**: Select whether this is a production environment or not. + - **API Gateways**: Enter the API Gateway's name and hostname. + +6. Select **Create**. +7. On the *Environment Created* confirmation page, select **Go to \**. +8. In the *API Gateways* list, select the **Actions** menu (represented by an ellipsis, `...`). Then select **Edit Advanced Config**. +9. On the left menu, select **Global Policies**. +10. In the list of Global Policies, the Log Format policy should be enabled by default. To edit the policy, select the ellipsis icon (`...`), then select **Edit Policy**. +11. Customize the policy settings to suit your requirements. Refer to the [Policy Settings](#policy-settings) section for an overview of the available options and their possible configurations. +12. Select **Save** to save the changes. +13. Select **Save and Submit** to publish the policy changes to the environment. + +{{%/tab%}} +{{
    }} diff --git a/content/nms/acm/how-to/policies/manage-policies.md b/content/nms/acm/how-to/policies/manage-policies.md new file mode 100644 index 000000000..5499ab67c --- /dev/null +++ b/content/nms/acm/how-to/policies/manage-policies.md @@ -0,0 +1,181 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to set + up policies. +docs: DOCS-925 +doctypes: +- task +tags: +- docs +toc: true +weight: 100 +title: How to Set Up Policies +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +{{< include "acm/how-to/policies-intro.md" >}} + +--- + +### Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with [API Gateways]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}) or [Developer Portals]({{< relref "/nms/acm/getting-started/add-devportal" >}}). + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +--- + +## Set Up Global Policies + +Global Policies are configured at the environment level and apply to all clusters and proxies within the environment. + +{{< include "acm/about/global-policies.md" >}} + +
    + +To manage Global Policies, take the steps below: + +1. In the API Connectivity Manager user interface, go to **Infrastructure > Workspaces > Environments**. +2. Select the Environment that holds the cluster that you want to configure, then select the **Cluster** name. +3. Select the **Manage** icon for the cluster that you want to configure. +4. Select the **Global Policies** tab. +5. [**Add**](#add-cluster-policy), [**Edit**](#edit-cluster-policy), or [**Remove**](#remove-cluster-policy) as desired. +6. **Save and Submit** your changes. + +### Add a Policy {#add-cluster-policy} + +Take the steps in this section to add a new policy to a cluster. + +1. Go to **Manage > Global Policies** for the cluster. +1. Select **Add Policy** from the policy's **Actions** menu. +1. Complete the form provided to configure the policy, then select **Add**. +1. **Save and Submit** your changes. + +### Edit a Policy {#edit-cluster-policy} + +To edit a policy, take the steps below. + +1. Go to **Manage > Global Policies** for the cluster. +1. Select **Edit Policy** from the policy's **Actions** menu. +1. Edit the policy as needed. +1. Select **Save** and **Save and Submit**. + +### Remove a Policy {#remove-cluster-policy} + +To remove a policy, take the steps below. + +1. Go to the **Global Policies** tab for the cluster. +1. Select **Remove Policy** from the policy's **Actions** menu. + +--- + +## Set Up API Proxy Policies + +{{< include "acm/about/api-proxy-policies.md" >}} + +
    + +Any Global Policies will automatically be applied when you add an API Proxy. +You can also configure any of the optional policies at the proxy level. + +To manage Proxy Policies, take the steps below. + +1. In the API Connectivity Manager user interface, go to **Services > Workspaces > Proxies**. +1. Select **Edit Proxy** from the **Actions** menu for the Proxy that you want to configure. +1. Select the **Policies** tab. +1. [**Add**](#add-proxy-policy), [**Edit**](#edit-proxy-policy), or [**Remove**](#remove-proxy-policy) as desired. +1. **Save and Publish** your changes. + +### Add a Policy {#add-proxy-policy} + +Take the steps in this section to add a new policy to a cluster. + +1. Go to **Edit Proxy > Policies**. +1. Select **Add Policy** from the policy's Actions menu. +1. Complete the form to configure the policy, then select the **Add** button. +1. **Save and Submit** your changes. + +### Edit a Policy {#edit-proxy-policy} + +Take the steps below to edit a policy. + +1. Go to **Edit Proxy > Policies**. +1. Select **Edit Policy** from the policy's Actions menu. +1. Edit the policy as needed. +1. Select **Save**, then **Save and Publish**. + +### Remove a Policy {#remove-proxy-policy} + +To remove a policy, take the steps below. + +1. Go to **Edit Proxy > Policies**. +1. Select **Remove Policy** from the policy's Actions menu. + +--- + +## Set Up Cluster Policies + +Cluster Policies are applied to all the proxies belongnig to the desired cluster. In another words, these policies are applied to a cluster of F5 NGINX Plus instances which can have one or more API Gateways and Developer Portals deployed on them. + +The following table shows the available Cluster Policies you can use when creating a new cluster. + +
    + +**Legend:** + +- = Supported +- = Applied by default + +{{}} + +| Policy Name | HTTP Environment | gRPC Environment | Applied On | Description | +|-------------------------------------------------------------------|-------------------------------------------------|-------------------------------------------------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Cluster Wide Config Setting]({{< relref "/nms/acm/how-to/policies/cluster-wide-config.md" >}}) | | | inbound | Fine tune the settings to speed up data processing and improve the performance of the API proxy for large number of connections. When applied, the settings are applicable to all the instances in a proxy cluster. If the proxy cluster is shared between environments, the changes made in any environment will be reflected in all the other environments. | +| [Cluster Zone Sync]({{< relref "/nms/acm/how-to/policies/cluster-zone-sync.md" >}}) | | | inbound | Enables runtime state sharing between the instances belonging to a proxy cluster. Options configured through this policy affect other policies such as rate limit and OIDC. This policy is applied to all the instances in a proxy cluster. If the proxy cluster is shared between environments, any changes made to this policy will affect all the other environments. | + +{{}} + +
    + +To manage Cluster Policies, take the steps below: + +1. In the API Connectivity Manager user interface, go to **Infrastructure > Workspaces > Environments**. +2. Select the Environment that holds the cluster that you want to configure, then select the **Cluster** name. +3. Select the **Manage** icon for the cluster that you want to configure. +4. Select the **Cluster Policies** tab. +5. [**Add**](#add-cluster-policy), [**Edit**](#edit-cluster-policy), or [**Remove**](#remove-cluster-policy) as desired. +6. **Save and Submit** your changes. + +### Add a Policy {#add-cluster-policy} + +Take the steps in this section to add a new policy to a cluster. + +1. Go to **Manage > Cluster Policies** for the cluster. +1. Select **Add Policy** from the policy's **Actions** menu. +1. Complete the form provided to configure the policy, then select **Add**. +1. **Save and Submit** your changes. + +### Edit a Policy {#edit-cluster-policy} + +To edit a policy, take the steps below. + +1. Go to **Manage > Cluster Policies** for the cluster. +1. Select **Edit Policy** from the policy's **Actions** menu. +1. Edit the policy as needed. +1. Select **Save** and **Save and Submit**. + +### Remove a Policy {#remove-cluster-policy} + +To remove a policy, take the steps below. + +1. Go to the **Cluster Policies** tab for the cluster. +1. Select **Remove Policy** from the policy's **Actions** menu. + +--- diff --git a/content/nms/acm/how-to/policies/openID-connect.md b/content/nms/acm/how-to/policies/openID-connect.md new file mode 100644 index 000000000..c35439606 --- /dev/null +++ b/content/nms/acm/how-to/policies/openID-connect.md @@ -0,0 +1,433 @@ +--- +description: As an Infrastructure Administrator, use this guide to configure OpenID + Connect policy to enable Single Sign On for the gateways. +docs: DOCS-1134 +doctypes: +- API Connectivity Manager +- API management +- task +- reference +tags: +- docs +title: OpenID Connect Relying Party Policy +toc: true +weight: 910 +title: OpenID Connect +--- + +--- + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +--- + +## About OpenID Connect Policy + +OpenID Connect (OIDC) builds on OAuth 2.0 to offer an identity layer and a unified authentication process for securing APIs, native apps, and web applications. Clients can authenticate an end-user's identity by using an Authorization Server. End-user information is communicated using claims in a security token called an identity token. + +The OpenID Connect policy for API Connectivity Manager provides users with a convenient and secure single sign-on experience, allowing them to log in to multiple OAuth-enabled applications with a single set of credentials. This policy can be easily integrated with any compatible identity provider, providing single sign-on access to both API gateways and Developer Portals. + +### Intended Audience + +{{< include "acm/how-to/policies/api-owner-persona.md">}} + +--- + +## Before You Begin + +Before configuring API gateways and Developer Portals as OpenID Connect relying parties (RPs), you need to gather the necessary Identity provider (IDP) details: + +- IDP's well-known endpoints +- Client ID +- Client Secret (needed depending on the OAuth flow) + +{{< note >}} + +The Developer Portal supports both PCKE and AuthCode [authorization code flows](https://auth0.com/docs/get-started/authentication-and-authorization-flow/authorization-code-flow). + +{{< /note >}} + +--- + +## Workflow for Applying Policy + +To apply the OpenID Connect (OIDC) policy or make changes to it, here's what you need to do: + +- [Edit an existing environment or create a new one]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). +- Select the cluster on which to apply the policy. +- Check the advanced settings to see if the policy has been applied. +- Edit the policy as needed. +- Save and publish the changes. + +--- + +## Policy Settings + + +{{< bootstrap-table "table table-striped table-bordered" >}} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldTypePossible Values
    Description
    RequiredDefault
    authFlowTypestringAUTHCODE
    PCKE
    PKCE is an OAuth 2.0 security extension for public clients on mobile devices intended to avoid a malicious programme creeping into the same computer from intercepting the authorisation code.NoAUTHCODE
    authorizationEndpointstringExample:
    https://accounts.google.com/o/oauth2/v2/auth
    URL of the IdP's OAuth 2.0 Authorization Endpoint.YesN/A
    authzParams.keystringBetween 1 and 128 charactersNoN/A
    authzParams.paramTypestringQUERY. PATH, HEADERNoN/A
    authzParams.valuestringBetween 1 and 128 charactersNoN/A
    errorReturnConditions.
    noMatch.
    returnCode
    integerIn range 100-599The error code that needs to be used by the nginx data-plane when basic auth is invalid or not supplied.No403
    errorReturnConditions.
    notSupplied.
    returnCode
    IntegerIn range 100-599The error code that needs to be used by the nginx data-plane when invalid clientID is supplied.No401
    jwksURIstringExample:
    https://www.googleapis.com/oauth2/v3/certs
    YesN/A
    logOffEndpointstringExample:
    https://oauth2.googleapis.com/revoke
    YesN/A
    logOutParams.keystringBetween 1 and 128 charactersNoN/A
    logOutParams.paramTypestringQUERY, PATH, HEADERNoN/A
    logOutParams.valuestringBetween 1 and 512 charactersNoN/A
    resolver.enableIPv6booleantrue/falseNofalse
    resolver.servers.hostnamestringBetween 1 and 253 charactersYesN/A
    resolver.servers.portintegerIn range 1-65535Yes80
    resolver.timeoutstringExample: 30s
    Between 2 and 14 characters
    No30s
    resolver.validstringExample: 24s
    Between 2 and 14 characters
    No30s
    returnTokenToClientOnLoginstringid_token, noneOptionally return token as a query param to the app after successful login.NoN/A
    tokenEndpointstringExample:
    https://oauth2.googleapis.com/token
    URL of the IdP's OAuth 2.0 Token Endpoint.YesN/A
    tokenParams.keystringBetween 1 and 128 charactersNoN/A
    tokenParams.paramTypestringQUERY, PATH, HEADERNoN/A
    tokenParams.valuestringBetween 1 and 512 charactersNoN/A
    uris.loginURIstringExample: /loginThis location is called by frontend for logging-in IDP using OpenID Connect.NoN/A
    uris.logoutURIstringExample: /logoutThis location is called by UI to handle OIDC logout with IDP as per: https://openid.net/specs/openid-connect-rpinitiated-1_0.html#RPLogoutNoN/A
    uris.redirectURIstringExample: /_codexchThis location is called by the IdP after successful authentication.NoN/A
    uris.userInfoURIstringExample: /userinfoThis location is called by frontend to retrieve user info via the IDP.NoN/A
    userInfoEndpointstringExample:
    https://openidconnect.googleapis.com/v1/userinfo
    URL of the IdP's UserInfo Endpoint.YesN/A
    userRegistrationstringUser registration URLs, can be used to specify customer or workforce registration URLs.NoN/A
    wellKnownEndpointstringExample:
    https://accounts.google.com/.well-known/openid-configuration
    OIDC .well-known configuration endpoint. The well-known endpoint returns OpenID Connect metadata about the authorization server.NoN/A
    + + +{{}} + + +--- + +You can set up an OIDC policy by using either the web interface or the REST API. + +## Applying the Policy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To apply the OpenID Connect policy using the REST API, send an HTTP `POST` request to the Environments endpoint. + + +{{}} + +| Method | Endpoint | +|--------|-----------------------------------------------------------------------| +| POST | `/api/v1/infrastructure/workspaces/{proxyWorkspaceName}/environments` | + +{{}} + + + +JSON request + +```json +{ + "policies": { + "oidc-authz": [ + { + "action": { + "authFlowType": "PKCE", + "authorizationEndpoint": "https:///v1/Authorize", + "jwksURI": "https:///v1/keys", + "logOffEndpoint": "https:///v1/logout", + "tokenEndpoint": "https:///v1/Token", + "userInfoEndpoint": "https:///v1/userinfo", + "uris": { + "loginURI": "/login", + "logoutURI": "/logout", + "redirectURI": "/_codexch", + "userInfoURI": "/userinfo" + }, + "returnTokenToClientOnLogin": "none", + "forwardTokenToBackend": "access_token", + "errorReturnConditions": { + "noMatch": { + "returnCode": 403 + }, + "notSupplied": { + "returnCode": 401 + } + } + }, + "data": [ + { + "clientID": "myclientID1234", + "scopes": "email+openid+profile" + } + ] + } + ] + } +} +``` + +This JSON defines an OpenID Connect (OIDC) authorization policy. It specifies the URL endpoints for the authorization, token, and user info services, as well as the URIs for login, logout, and redirect activities. It also defines that the client ID and scopes are "myclientID1234" and "email+openid+profile", respectively. Additionally, it specifies how to handle errors, such as returning a 403 code when there is no match and a 401 code when the data is not supplied. + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +1. {{< include "acm/webui-acm-login.md" >}} +1. On the left menu, select **Infrastructure**. +1. From the list of workspaces, select the workspace for your cluster's environment. +1. From the list of environments, select the environment for your cluster. +1. From the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. +1. On the left menu, select **Global Policies**. +1. Locate the **OpenID Connect Relying Party** policy in the list of policies. On the **Actions** menu (represented by an ellipsis, `...`), select **Add Policy**. +1. In the API Connectivity Manager user interface, go to **Infrastructure > Workspaces > Environments** and select the **Edit Advanced Config** from the **Actions** menu for the cluster you want to set up. +1. Select the **Global Policies** tab. +1. For **OpenID Connect Relying Party** select **Add Policy** from the policy's **Actions** menu. +1. Update **Application Settings**. + +{{< include "acm/how-to/update-application-settings.md" >}} + +12. Update **Authorization Server Settings** + +{{< include "acm/how-to/update-authorization-server-settings.md" >}} + +13. Update **General Settings** + +{{< include "acm/how-to/update-general-settings.md" >}} + +14. Update **Custom Error Handling**. + + You can customize how the proxy should handle the following error conditions: + +- when Client ID is not supplied +- when there is no match for the Client ID + + Specify the HTTP error code in the box next to the error condition. The specified error code will be displayed when the related error condition is true. + +15. Select **Add**. +1. Select **Save and Submit** your changes. + +{{%/tab%}} + +{{}} diff --git a/content/nms/acm/how-to/policies/proxy-cache.md b/content/nms/acm/how-to/policies/proxy-cache.md new file mode 100644 index 000000000..db66846da --- /dev/null +++ b/content/nms/acm/how-to/policies/proxy-cache.md @@ -0,0 +1,166 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to enable + and configure caching to improve the performance of your API gateway proxy. +docs: DOCS-1190 +doctypes: +- reference +tags: +- docs +title: Proxy Cache +toc: true +weight: null +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro.md" >}} + +--- + +## About the Policy + +Enable and configure caching to improve the performance of your API Gateways, speed up delivery to clients, and reduce the load on the backend API runtimes. When caching is enabled, the API Gateway saves responses to a disk cache and uses them to respond to clients without having to proxy requests for the same content every time. + +By default, the API Gateway caches all responses to requests made with the HTTP GET and HEAD methods the first time such responses are received from a proxied server. The API Gateway uses the request string as a request's key (identifier). If a request has the same key as a cached response, the API Gateway sends the cached response to the client. You can customize and control which responses are cached. + +Fine-tune the cache for further improvements in performance by instructing it to use conditional GET requests when refreshing content from origin servers, set a minimum request number to cache content, enable background update, and cache lock. + +### Intended Audience + +{{< include "acm/how-to/policies/api-owner-persona.md">}} + +--- + +## How to apply the policy + +- Create an API proxy or edit an existing one. +- Check the advanced settings for the API proxy to see if the policy has been applied. +- Edit the policy to make changes for each API proxy. Save and publish the changes. + +--- + +## Policy Settings + +The following table lists the configurable settings and their default values for the policy. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Datatype | Possible Values | Description | Required | Default | +|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------| +| `httpMethods` | array | GET, HEAD, POST | HTTP request methods to cache. | No | GET, HEAD | +| `cacheKey` | array | host, requestURI, scheme, cookieJSessionID, cookieUser | Keys to be cached. 'host' is the name & port of the proxied server. 'requestURI' is the URI of the request. 'scheme' is the protocol used to access the resource on the proxied server. 'cookieJSessionID' is the cookie used for session management. 'cookieUser' is the cookie used for user management. | No | host, requestURI, scheme | +| `maxCacheSize` | string | 1K (available units - K, M, G) | Upper limit of the size of the cache. | No | 1G | +| `cacheValidTime` | string | 1s (available units - s, m, h) | Enforces an expiration for the cached data. | No | 10m | +| `minUseOfProxyToCache` | integer | 1 | Minimum number of client requests before caching is enabled. | No | 1 | +| `reValidate` | boolean | true/false | Enables revalidation of expired cache items using conditional GET requests with the If-Modified-Since and If-None-Match header fields. | No | false | +| `backgroundUpdate` | boolean | true/false | Enables delivery of stale content when clients request an item that is in the process of being updated from the origin server. All updates will be done in the background. The stale file is returned for all requests until the updated file is fully downloaded. | No | false | +| `stale.backendErrors` | array | error, timeout, invalid_header, updating | Determines in which cases a stale cached response can be used during communication with the proxied server. | No | | +| `stale.backendCodes` | array | 403, 404, 429, 500, 502, 503, 504 | Determines for which HTTP status codes a stale cached response can be used during communication with the proxied server. | No | | +| `cacheLock.enabled` | boolean | true/false | When enabled, only one request at a time will be allowed to populate a new cache element identified according to the cacheKey by passing a request to a proxied server. | No | false | +| `cacheLock.age` | string | 1s (available units - s, m, h) | If the last request passed to the proxied server for populating a new cache element has not completed for the specified time, one more request may be passed to the proxied server. | No | 5s | +| `cacheLock.timeout` | string | 1s (available units - s, m, h) | Sets a timeout for cacheLock; When the time expires, the request will be passed to the proxied server, however, the response will not be cached. | No | 5s | + +{{< /bootstrap-table >}} + + +--- + +## Adding the Policy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To apply the Proxy Cache policy using the REST API, send an HTTP `POST` request to the Proxies endpoint. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Method | Endpoint | +|--------|---------------------| +| `POST` | `/services/workspaces/{workspaceName}/proxies` | + +{{}} + + +
    +JSON request + +``` json +{ + "policies": { + "proxy-cache": [ + { + "action": { + "httpMethods": [ + "GET", + "HEAD", + "POST" + ], + "cacheKey": [ + "host", + "requestURI", + "scheme", + "cookieJSessionID", + "cookieUser" + ], + "maxCacheSize": "1G", + "cacheValidTime": "10m", + "minUseOfProxyToCache": 1, + "reValidate": true, + "backgroundUpdate": true, + "stale": { + "backendErrors": [ + "error", + "timeout", + "invalid_header", + "updating" + ], + "backendErrorCodes": [ + 403, + 404, + 429, + 500, + 502, + 503, + 504 + ] + }, + "cacheLock": { + "enabled": true, + "age": "5s", + "timeout": "5s" + } + } + } + ] + } +} +``` + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To apply the Proxy Cache policy using the web interface: + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Services**. +3. Select a workspace in the list that contains the API Proxy you want to update. +4. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +5. On the left menu, select **API Proxy > Advanced > Policies**. +6. On the *Advanced > Policies* page, on the **API Proxy** tab, locate **Proxy Cache**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. +7. Modify the configuration as needed. +8. Select **Add** to apply the policy to the API Proxy. +9. Select **Save and Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} + +{{
    }} + +--- diff --git a/content/nms/acm/how-to/policies/proxy-request-headers.md b/content/nms/acm/how-to/policies/proxy-request-headers.md new file mode 100644 index 000000000..148d6a3e4 --- /dev/null +++ b/content/nms/acm/how-to/policies/proxy-request-headers.md @@ -0,0 +1,187 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to set + request headers to send to your backend services. +docs: DOCS-1129 +doctypes: +- API Connectivity Manager +- api management +- reference +tags: +- docs +toc: true +weight: 1100 +title: Proxy Request Headers +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro" >}} + +--- + +## About the Policy + +The Proxy Request Headers policy allows users to pass default and custom request headers to backend services. + +This policy is enabled by default when you [publish an API Proxy]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}). + +### Intended Audience + +{{< include "acm/how-to/policies/api-owner-persona.md">}} + +--- + +## Before You Begin + +To complete the steps in this guide, you need the following: + +- API Connectivity Manager is installed, licensed, and running. +- An [API gateway environment]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}) +- A [published API Gateway]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}) + +--- + +## Policy Settings {#policy-settings} + +The following table lists the configurable settings and their default values for the policy. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default | +|-------------------------------------------------|----------|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------| +| `proxyDefaultHeadersToBackend` | boolean | `true`,
    `false` |

    When set to `true`, the default headers are passed to backend services.

    For more information, refer to the [Default Headers]({{< relref "#default-headers" >}}) section.

    | No | `True` | +| `proxyCustomHeadersToBackend.`
    `key` | string | Example: `my-header` | The name of the HTTP header. | Yes | N/A | +| `proxyCustomHeadersToBackend.`
    `value` | string | Example: `var.test` |

    The value of the HTTP header.

    For more information, refer to the [Header Value Prefixes]({{< relref "#value-prefixes" >}}) section.

    | Yes | N/A | +| `proxyCustomHeadersToBackend.`
    `isSensitive` | boolean | `true`,
    `false` | When set to `false`, the header will not appear in logs. | No | `False` | + +{{< /bootstrap-table >}} + + +### Default Headers {#default-headers} + +{{}}When `proxyDefaultHeadersToBackend` is `true`, the following headers are applied.{{}} + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Header | Description | +|-------------------|------------------------------------------------------------| +| `Accept-Encoding` | Set to an empty string. | +| `Host` | Set to the IP address of the machine proxying the request. | +| `X-Real-IP` | Set to the IP client's address. | +| `Connection` | Set to an empty string. | + +{{< /bootstrap-table >}} + + +### Header Value Prefixes {#value-prefixes} + +{{}}When adding a custom header to `proxyCustomHeadersToBackend,` include one of the following prefixes for the `value` setting.{{}} + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Prefix | Example | Description | +|---------------|----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `var` | var.content_length | Pass a [valid NGINX variable](http://nginx.org/en/docs/varindex.html). | +| `header` | header.referrer | Pass a header from the client request. | +| `client` | client.IP | Pass a value from the client if a [Basic Auth]({{< relref "/nms/acm/how-to/policies/basic-authn" >}}) or [API Key]({{< relref "/nms/acm/how-to/policies/apikey-authn" >}}) policy has been configured. | +| `stringValue` | stringValue.MyString | Pass a static string. | +| `token` | token.sub | Pass a value from the JSON Web Token (JWT) if the [OAuth2 JWT Assertion]({{< relref "/nms/acm/how-to/policies/jwt-assertion" >}}) policy has been configured. | + +{{< /bootstrap-table >}} + + +--- + +## Applying the Policy + +You can apply this policy using either the web interface or the REST API. + +
    + +{{}} +{{%tab name="API"%}} + +
    + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To apply the Proxy Request Headers policy using the REST API, send an HTTP `PUT` request to the Proxies endpoint. + + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------| +| `PUT` | `/services/workspaces/{SERVICE_WORKSPACE_NAME}/proxies` | + +{{}} + + +
    +JSON request + +```json +{ + "policies": { + "proxy-request-headers": [ + { + "action": { + "proxyHeaders": { + "proxyDefaultHeadersToBackend": true, + "proxyCustomHeadersToBackend": [ + { + "key": "my-custom-header", + "value": "stringValue.myValue", + "isSensitive": true + } + ] + } + } + } + ] + } +} +``` + +This JSON configures a policy for handling proxy request headers. It instructs the proxy to forward the default headers to the backend, and also to forward a custom header, `my-custom-header`, with a specific value, `stringValue.myValue`. The custom header is marked as sensitive, meaning it won't show up in the logs. + +
    + +{{%/tab%}} +{{%tab name="UI"%}} + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Services**. +3. Select a workspace in the list that contains the API Proxy you want to update. +4. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. +5. On the left menu, select **API Proxy > Advanced > Policies**. +6. On the *Advanced > Policies* page, on the **API Proxy** tab, locate **Proxy Request Headers**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Policy**. +7. Toggle **Set Default Headers** on or off to include default headers or not. This setting is enabled by default. +8. To add custom headers, select **Add Custom Header**, then complete the necessary fields: + + - **Header**: The name of the custom HTTP header. + - **Value**: The value of the custom HTTP header. + + The value must include one of the following prefixes: + + - `var.` + - `header.` + - `client.` + - `stingValue.` + - `token.` + + For example, to pass a static string for the value, you might type `stringValue.` for the value. + + To learn more about the prefix options and formatting requirements, refer to the [Header Value Prefixes](#value-prefixes) section. + + - **Is Sensitive**: Turn on to prevent writing the custom header to logs. + +9. Select **Save** to apply the policy to the API Proxy. +10. Select **Save and Publish** to deploy the configuration to the API Proxy. + +{{%/tab%}} +{{
    }} + diff --git a/content/nms/acm/how-to/policies/proxy-response-headers.md b/content/nms/acm/how-to/policies/proxy-response-headers.md new file mode 100644 index 000000000..f2f03443c --- /dev/null +++ b/content/nms/acm/how-to/policies/proxy-response-headers.md @@ -0,0 +1,248 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to set + response headers to send to your clients. +docs: DOCS-1135 +doctypes: +- reference +tags: +- docs +title: Proxy Response Headers +toc: true +weight: 1150 +--- + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +## About the Policy + +Customize the Proxy Response Headers policy to include or exclude headers in the proxy response. By default, the standard headers are included in the response. In addition, you can specify whether the header is always included regardless of the response code. You can also add custom headers and values to include in the response. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} + +--- + +## Workflow for Applying Policy + +To apply the policy or make changes to it, here’s what you need to do: + +- Create an environment or edit an existing one. +- Check the advanced settings for the environment to see if the policy has been applied. +- Edit the policy to make changes for each environment. Save and publish the changes. + +--- + +## Policy Settings + +The following table lists the configurable settings and their default values for the policy. + +### Standard Headers + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Datatype | Possible Values |
    Description
    | Required | Default | Always Include| +|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------|---------| +| `web-security-headers` | boolean | true/false | When set to true, the default headers are passed in proxy response. For more information, refer to the Web Security Headers section. | No | true | true | +| `latency-headers` | boolean | true/false | When set to true, the default headers are passed in proxy response. For more information, refer to the Latency Headers section. | No | true | false | +| `cache-headers` | boolean | true/false | When set to true, the default headers are passed in proxy response. For more information, refer to the Cache Headers section. | No | true | true | +| `client-headers` | boolean | true/false | When set to true, X-Client-Original-IP header is passed in proxy response. For more information, refer to the Client Headers section. | No | true | true | +| `hide-nginx-headers` | boolean | true/false | When set to true, nginx version is not passed in Server header in proxy response. For more information, refer to the Hide NGINX Headers section. | No | false | false | +| `correlation-id` | boolean | true/false | When set to true, the correlation id header is passed in proxy response. For more information, refer to the Correlation ID Headers section. | No | true | true | + +{{< /bootstrap-table >}} + + +### Web Security Headers + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Header | Example | Directive |
    Description
    | +|--------------|----------|---------------------|----------------------------------------------------| +| `Strict-Transport-Security` | Strict-Transport-Security: max-age=31536000; includeSubDomains | add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" [always] | HSTS response header informs browsers that the site should only be accessed using HTTPS, and that any future attempts to access it using HTTP should automatically be converted to HTTPS. | +| `X-Frame-Options` | X-Frame-Options: SAMEORIGIN | add_header X-Frame-Options SAMEORIGIN [always] | The X-Frame-Options HTTP response header can be used to indicate whether or not a browser should be allowed to render a page in iframe. Sites can use this to avoid click-jacking attacks, by ensuring that their content is not embedded into other sites. | +| `X-Content-Type-Options` | X-Content-Type-Options: nosniff | add_header X-Content-Type-Options nosniff [always] | The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised in the Content-Type headers should be followed and not be changed. The header allows you to avoid MIME type sniffing by saying that the MIME types are deliberately configured. | +| `X-Xss-Protection` | X-Xss-Protection: 1; mode=block | add_header X-Xss-Protection "1; mode=block" [always] | The HTTP X-XSS-Protection response header is a feature of Internet Explorer, Chrome and Safari that stops pages from loading when they detect reflected cross-site scripting (XSS) attacks. These protections are largely unnecessary in modern browsers when sites implement a strong Content-Security-Policy that disables the use of inline JavaScript ('unsafe-inline'). | + +{{< /bootstrap-table >}} + + +### Latency Headers + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Header | Example | Directive |
    Description
    | +|--------------|----------|---------------------|----------------------------------------------------| +| `X-Backend-Latency` | X-Backend-Latency: 0.744 | add_header X-Backend-Latency $upstream_header_time [always] | Backend/Upstream response time | +| `X-Total-Request-Response-Latency` | X-Total-Request-Response-Latency: 0.743 | add_header X-Total-Request-Response-Latency $request_time [always] | Request time | +| `X-Backend-Connection-Time` | X-Backend-Connection-Time: 0.433 | add_header X-Backend-Connection-Time $upstream_connect_time [always] | Backend/Upstream connect time | + + +{{< /bootstrap-table >}} + + +### Cache Headers + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Header | Example | Directive |
    Description
    | +|--------------|----------|---------------------|----------------------------------------------------| +| `Cache-Control` | Cache-Control: public, must-revalidate, proxy-revalidate | add_header Cache-Control "public, must-revalidate, proxy-revalidate" [always] | The Cache-Control HTTP header field holds directives (instructions) — in both requests and responses — that control caching in browsers and shared caches (e.g. Proxies, CDNs). | + +{{< /bootstrap-table >}} + + +### Client Headers + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Header | Example | Directive |
    Description
    | +|--------------|----------|---------------------|----------------------------------------------------| +| `X-Client-Original-IP` | X-Client-Original-IP: 172.10.10.10 | add_header X-Client-Original-IP $realip_remote_addr [always] | Client original IP. | + +{{< /bootstrap-table >}} + + +### Hide NGINX Headers + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Header | Example | Directive |
    Description
    | +|--------------|----------|---------------------|----------------------------------------------------| +| `Server` | Server: nginx | server_tokens off | NGINX version is not passed in Server header in proxy response. Server: nginx [nginx/1.23.2] | + + +{{< /bootstrap-table >}} + + +### Correlation ID Headers + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Header | Example | Directive |
    Description
    | +|--------------|----------|---------------------|----------------------------------------------------| +| `` | x-correlation-id: 26fd65ab0bbe36e546e3da14f4aea89f | add_header ` [always]` | There must also be a request-correlation-id policy that will tell you the header name that gets used. The correlation id value is usually the $request_id but there is logic that that can be overridden by a specific value in the request header itself. | + + +{{< /bootstrap-table >}} + + +### Custom Headers + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Header | Example | Directive |
    Description
    | +|--------------|----------|---------------------|----------------------------------------------------| +| `` | x-custom-header: 3da14f4aea89f | add_header ` [always]` | Add a custom header. | + + +{{< /bootstrap-table >}} + + +--- + +## Adding the Policy + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To create or update a Response Headers policy using the REST API, send an HTTP `PUT` request to the Environment endpoint. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Method | Endpoint | +|--------|---------------------| +| `PUT` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | + +{{}} + + +
    +JSON request + +``` json +{ + "policies": { + "proxy-response-headers": [ + { + "action": { + "config": [ + { + "always": true, + "enabled": true, + "name": "web-security-headers" + }, + { + "always": true, + "enabled": true, + "name": "correlation-id" + }, + { + "always": false, + "enabled": true, + "name": "latency-headers" + }, + { + "always": true, + "enabled": true, + "name": "cache-headers" + }, + { + "always": false, + "enabled": false, + "name": "hide-nginx-headers" + }, + { + "always": true, + "enabled": true, + "name": "client-headers" + } + ], + "customResponseHeaders": [ + { + "always": true, + "key": "x-custom-header", + "value": "3da14f4aea89f" + } + ] + } + } + ] + } +} +``` + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To create/update Response Headers policy using the web interface: + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Infrastructure**. +3. Choose the workspace that includes the environment for the cluster you want to add the policy to. +4. Select the environment for your cluster. +5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, ...), select **Edit Advanced Config**. +6. On the left menu, select **Global Policies**. +7. From the list of policies, locate the policy, then select **Add Policy** from the **Actions** menu (represented by an ellipsis, ...). +8. Select **Save and Submit** to deploy the configuration. + +{{%/tab%}} + +{{
    }} diff --git a/content/nms/acm/how-to/policies/rate-limit.md b/content/nms/acm/how-to/policies/rate-limit.md new file mode 100644 index 000000000..4bda85215 --- /dev/null +++ b/content/nms/acm/how-to/policies/rate-limit.md @@ -0,0 +1,150 @@ +--- +description: Learn how to use the F5 NGINX Management Suite API Connectivity Manager + Rate Limiting policy to protect backend servers. The Rate Limiting policy lets you + limit connections and the rate of requests based on request URI, client IP address, + or authenticated clients. +docs: DOCS-1251 +doctypes: +- API Connectivity Manager +- api management +- task +tags: +- docs +title: Rate Limiting +toc: true +weight: 1300 +--- + +## Overview + +{{< include "acm/how-to/policies-proxy-intro" >}} + +--- + +## About the Policy + +The Rate Limit policy can be used to throttle the number of requests in a time period that enter an application. +You can specify multiple rate limit stipulations with a single policy based on the **Request URI**, **Client IP address** or the **Authenticated Client ID**. +The policy can also specify the type of traffic shaping required to allow burst traffic or two-stage rate limiting. + +### Intended Audience + +This guide is meant for F5 NGINX Management Suite Administrators who can modify or create policies on an API Gateway Proxy. + +--- + +## Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with an [API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}). +- You have published one or more [API Gateways]({{< relref "/nms/acm/getting-started/publish-api-proxy" >}}). + + +## Policy Settings + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default Value | +|------------------------|---------|--------------------------------|----------------------------------------------------------------------------------------------------------|----------|--------------------| +| `returnCode` | int | In range `400-599` | The return code that used when the total number of requests have been exceeded. | Yes | `429` | +| `grpcStatusCode` | int | In range `400-599` | The return code that used when the total number of requests have been exceeded. | No | `429` | +| `limits.rate` | string | Example:
    `10r/s` | The total number of requests allowed over a given amount of time. | Yes | `10r/s` | +| `limits.rateLimitBy` | string | `uri`, `consumer`, `client.ip` | The value on which to apply the rate limiting on. | Yes | `client.ip` | +| `limits.zoneSize` | string | Example:
    `10M` | The size of the shared memory buffer for the proxy. | Yes | `10M` | +| `throttle.delay` | int | Example:
    `5` | The delay parameter defines the point at which, within the burst size, excessive requests are throttled. | No | `N/A` | +| `throttle.noDelay` | boolean | `true/false` | Decides if the request should be processed immediately or stored in buffer. | No | `N/A` | +| `throttle.burst` | int | Example:
    `10` | Total number of requests that can be handled in a burst before rate limiting is exceeded. | No | `N/A` | + + +{{< /bootstrap-table >}} + + +--- + +## Applying the Policy + +You can apply this policy using the web interface or the REST API. The policy uses `x-correlation-id` as the default HTTP header name, or you can provide a custom header value. + +
    + +{{}} + +{{%tab name="API"%}} + +Send a `POST` request to add the Rate limit policy to the API Proxy. + + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +
    +JSON request + +```json +{ + "policies": { + "rate-limit": [ + { + "systemMetadata": { + "appliedOn": "inbound", + "context": "proxy" + }, + "action": { + "limits": [ + { + "rate": "10r/s", + "rateLimitBy": "client.ip", + "zoneSize": "10M" + } + ] + } + } + ] + } +} +``` + +This JSON example defines a Request Correlation ID policy, which specifies that an HTTP header called `x-correlation-id` should be used when passing the correlation ID. + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To add a Request Correlation ID policy using the web interface: + +1. In the ACM user interface, go to **Services > \{your workspace}**, where "your workspace" is the workspace that contains the API Proxy. +1. Select **Edit Proxy** from the **Actions** menu for the desired API Proxy. +1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **Rate Limit**. +1. Multiple Rate limit stipulations can be added for a policy. +1. Configure the associated **Key**, **Limit**, **Unit** **Zone Size** and **Zone size unit** for each stipulation. +1. Optionally you can customize the type of rate limiting that is applied to the policy. Choose from one of the 3 following options + 1. **Buffer excess requests**: will allow bursts of requests to be stored in a buffer + 1. **Buffer excess requests no delay**: will allow bursts of requests to get processed immediately while there is space in the buffer + 1. **Throttle excess requests**: will enable Two-Stage rate limiting +1. Set custom error return code conditions if rate limiting **is exceeded**. +1. Select **Add** to apply the Rate Limit policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. +1. Select **Add** to apply the policy to the cluster. +1. Select **Save and Submit** to deploy the configuration. + +{{%/tab%}} + +{{
    }} + +--- + +## Common Use Cases +The following articles describe common use cases for rate limiting: + +1. [Rate Limiting with NGINX and NGINX Plus](https://www.nginx.com/blog/rate-limiting-nginx/) +1. [Deploying NGINX as an API Gateway, Part 2: Protecting Backend Services](https://www.nginx.com/blog/deploying-nginx-plus-as-an-api-gateway-part-2-protecting-backend-services/) diff --git a/content/nms/acm/how-to/policies/request-body-size-limit.md b/content/nms/acm/how-to/policies/request-body-size-limit.md new file mode 100644 index 000000000..7e35390a6 --- /dev/null +++ b/content/nms/acm/how-to/policies/request-body-size-limit.md @@ -0,0 +1,125 @@ +--- +description: Learn how to configure the Request Policy Size Limit policy to prevent + Denial of Service (DoS) and other types of attacks. +docs: DOCS-1122 +doctypes: +- task +tags: +- docs +title: Request Body Size Limit +toc: true +weight: 1200 +--- + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +--- + +## About the Policy + +The *Request Body Size Limit* policy, which by default is set to 1 MB, is applied to all API gateway proxy requests. If the request exceeds this limit, it will be blocked and an error code will be returned. You can adjust the limit to meet your requirements, or you can disable the policy completely by setting the max size to 0. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} + +--- + +## Workflow for Applying Policy + +To apply the policy or make changes to it, here's what you need to do: + +- [Edit an existing environment or create a new one]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). +- Check the advanced settings for the environment to see if the policy has been applied. +- Edit the policy to make changes for each environment. Save and publish the changes. + +--- + +## Policy Settings + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default | +|--------------|----------|-----------------------|----------------------------------------------------------------------------------------------------------------------------|----------|---------------------| +| `size` | string | Example:
    `1M` or `1K` |

    Sets the maximum body size for client requests.

    Megabytes, `M`, and Kilobytes, `K`, are the accepted units.

    | No | `1M` | +| `returnCode` | integer | In range:
    `400-599` |

    The error code that is returned to the client when the size of a request exceeds the configured value.

    The default error code is `413: Request Entity Too Large`.

    | No | `413` | + +{{< /bootstrap-table >}} + + +--- + +## Applying the Policy + +You can apply this policy using either the web interface or the REST API. + +
    + +{{}} + +{{%tab name="API"%}} + +{{}}{{< include "acm/how-to/access-acm-api.md" >}}{{}} + +To add the Request Body Size Limit policy using the REST API, send an HTTP `POST` request to the Environments endpoint. + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Method | Endpoint | +|--------|-------------------------------------------------------------------------------------| +| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | + +{{}} + + +
    +JSON request + +``` json +{ + "policies": { + "request-body-size-limit": [ + { + "action": { + "returnCode": 413, + "size": "1M" + } + } + ] + } +} +``` + +
    + +This example Request Body Size Limit policy rejects requests exceeding one megabyte and returns error code `413`. + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To add a Request Body Size Limit policy using the web interface: + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Infrastructure**. +3. Choose the workspace that contains your cluster's environment from the list of workspaces. +4. In the **Environments** section, select the environment name for your cluster. +5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. +6. On the left menu, select **Global Policies**. +7. Locate the **Request Body Size Limit** policy in the list of policies. On the **Actions** menu (represented by an ellipsis, `...`), select **Add Policy**. +8. On the **Request Body Size Limit** form, complete the necessary fields: + + - **Error code**: Specify the error code to return when a request exceeds the maximum size. The default is `413`. + - **Request body size**: Enter the maximum body size in megabytes or kilobytes. The default is 1 megabyte. +9. Select **Add** to apply the policy to the cluster. +10. Select **Save and Submit** to deploy the configuration. + +{{%/tab%}} + +{{
    }} diff --git a/content/nms/acm/how-to/policies/request-correlation-id.md b/content/nms/acm/how-to/policies/request-correlation-id.md new file mode 100644 index 000000000..5a4a96120 --- /dev/null +++ b/content/nms/acm/how-to/policies/request-correlation-id.md @@ -0,0 +1,122 @@ +--- +description: Learn how to use API Connectivity Manager's Request Correlation ID policy + to add a unique identifier to each request entering your app, which you can use + to trace end-to-end transactions in a distributed system. +docs: DOCS-1120 +doctypes: +- API Connectivity Manager +- api management +- task +tags: +- docs +title: Request Correlation ID +toc: true +weight: 1300 +--- + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +--- + +## About the Policy + +Use the Request Correlation ID policy to add a unique identifier to each request that enters an application. With the Correlation ID policy, you can trace end-to-end transactions moving through components in a distributed system. This policy is applied by default and usually uses `x-correlation-id` as the default HTTP header name. However, you can also provide a custom header value if needed. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} + +--- + +## Workflow for Applying Policy + +To apply the policy or make changes to it, here's what you need to do: + +- [Edit an existing environment or create a new one]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). +- Check the advanced settings for the environment to see if the policy has been applied. +- Edit the policy to make changes for each environment. Save and publish the changes. + +--- + +## Policy Settings + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default Value | +|------------------|--------|--------------------------------|--------------------------------------------------------------|----------|--------------------| +| `httpHeaderName` | string | Example:
    `x-correlation-id` | The HTTP header name to use when passing the correlation ID. | YES | `x-correlation-id` | + +{{< /bootstrap-table >}} + + +--- + +## Applying the Policy + +You can apply this policy using either the web interface or the REST API. The policy uses `x-correlation-id` as the default HTTP header name, or you can provide a custom header value. + +
    + +{{}} + +{{%tab name="API"%}} + +To create a Request Correlation ID policy using the REST API, send an HTTP `POST` request to the Environment endpoint. + + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------------------| +| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | + +{{}} + + +
    +JSON request + +```json +{ + "policies": { + "request-correlation-id": [ + { + "action": { + "httpHeaderName": "x-correlation-id" + } + } + ] + } +} +``` + +This JSON example defines a Request Correlation ID policy, which specifies that an HTTP header called `x-correlation-id` should be used when passing the correlation ID. + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To add a Request Correlation ID policy using the web interface: + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Infrastructure**. +3. Choose the workspace that includes the environment for the cluster you want to add the policy to. +4. Select the environment for your cluster. +5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. +6. On the left menu, select **Global Policies**. +7. From the list of policies, locate the **Request Correlation ID** policy, then select **Add Policy** from the **Actions** menu (represented by an ellipsis, `...`). +8. On the **Request Correlation ID** form, complete the necessary fields: + + - **HTTP Header Name**: The HTTP header name to use when passing the correlation ID. The default is `x-corrrelation-id`. + +9. Select **Add** to apply the policy to the cluster. +10. Select **Save and Submit** to deploy the configuration. + +{{%/tab%}} + +{{
    }} diff --git a/content/nms/acm/how-to/policies/request-header-specification.md b/content/nms/acm/how-to/policies/request-header-specification.md new file mode 100644 index 000000000..690b9f80e --- /dev/null +++ b/content/nms/acm/how-to/policies/request-header-specification.md @@ -0,0 +1,118 @@ +--- +description: Learn how to set up the Request Header Specification policy in API Connectivity + Manager to process headers with invalid characters. +docs: DOCS-1263 +doctypes: +- API Connectivity Manager +- api management +- task +tags: +- docs +title: Request Header Specification +toc: true +weight: 1300 +--- + +## Overview + +{{< include "acm/how-to/policies-intro" >}} + +--- + +## About the Policy + +Use the Request Header Specification policy to allow headers that would normally be considered invalid. This can be used to treat underscores as valid or allow all special header characters. + +### Intended Audience + +{{< include "acm/how-to/policies/infra-admin-persona.md">}} + +--- + +## Workflow for Applying Policy + +To apply the policy or make changes to it, here's what you need to do: + +- [Edit an existing environment or create a new one]({{< relref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). +- Check the advanced settings for the environment to see if the policy has been applied. +- Edit the policy to make changes for each environment. Save and publish the changes. + +--- + +## Policy Settings + + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Field | Type | Possible Values | Description | Required | Default Value | +|------------------|--------|--------------------------------|--------------------------------------------------------------|----------|--------------------| +| `invalidHeadersBehaviour` | string | Example:
    `ALLOW_ALL` | This can be set to `IGNORE_ALL` (the default behavior for NGINX), `ALLOW_UNDERSCORE`, or `ALLOW_ALL` | YES | `ALLOW_ALL` | + +{{< /bootstrap-table >}} + + +--- + +## Applying the Policy + +You can apply this policy using either the web interface or the REST API. Configuring the policy to `invalidHeadersBehaviour: IGNORE_ALL` will result in the same behavior as not applying the policy. + +
    + +{{}} + +{{%tab name="API"%}} + +To create a Request Correlation ID policy using the REST API, send an HTTP `POST` request to the Environment endpoint. + + +{{}} + +| Method | Endpoint | +|--------|---------------------------------------------------------------------| +| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | + +{{}} + + +
    +JSON request + +```json +{ + "policies": { + "request-correlation-id": [ + { + "action": { + "invalidHeadersBehaviour": "ALLOW_ALL" + } + } + ] + } +} +``` + +This JSON example defines a Request Header Specification policy. + +
    + +{{%/tab%}} + +{{%tab name="UI"%}} + +To add a Request Header Specification policy using the web interface: + +1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. +2. On the left menu, select **Infrastructure**. +3. Choose the workspace that includes the environment for the cluster you want to add the policy to. +4. Select the environment for your cluster. +5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. +6. On the left menu, select **Global Policies**. +7. From the list of policies, locate the **Request Header Specification** policy, then select **Add Policy** from the **Actions** menu (represented by an ellipsis, `...`). +8. On the **Request Header Specification** form, choose which configuration is appropriate for your environment. +9. Select **Add** to apply the policy to the cluster. +10. Select **Save and Submit** to deploy the configuration. + +{{%/tab%}} + +{{
    }} diff --git a/content/nms/acm/how-to/policies/tls-policies.md b/content/nms/acm/how-to/policies/tls-policies.md new file mode 100644 index 000000000..6e6ea8b12 --- /dev/null +++ b/content/nms/acm/how-to/policies/tls-policies.md @@ -0,0 +1,393 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to secure + communications by applying TLS policies. +docs: DOCS-926 +doctypes: +- task +tags: +- docs +toc: true +weight: 1400 +title: TLS +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +{{< include "acm/how-to/policies-intro.md" >}} + +The types of communication you can apply TLS policies to includes: + +- ingress traffic to an API or Dev Portal proxy; +- communications between an API proxy and a backend API service; and +- communications between the API Connectivity Manager management plane and the Dev Portal data plane. + +--- + +### Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with [API Gateway]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}) or [Dev Portal]({{< relref "/nms/acm/getting-started/add-devportal" >}}) clusters. + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +### How to Access the REST API + +{{< include "acm/how-to/access-acm-api" >}} + +--- + +## Secure Ingress Traffic + +Take the steps in this section to secure the traffic coming into your API Proxies. + +### Add TLS Listener + +{{}} + {{%tab name="UI"%}} + +1. In the API Connectivity Manager user interface, go to **Workspaces > Environments > \**, where "your environment" is the Environment that contains the Developer Portal. +1. Select **Edit Advanced Config** from the **Actions** menu for the desired Developer Portal. +1. On the **Listeners** tab, select **Add Listener**. +1. Provide the desired **Protocol** and **Port** (for example, `443`) and select the **TLS** checkbox. + + {{%/tab%}} + {{%tab name="API"%}} + +```json +"listeners": [ + { + "transportProtocol": "HTTP", + "port": 443, + "tlsEnabled": true, + "ipv6": false + } +``` + + {{%/tab%}} +{{}} + +### Add TLS Inbound Policy {#add-tls-inbound} + +{{}} +{{%tab name="UI"%}} + +1. Select the **Global Policies** tab. +1. Select **Add Policy** from the **Actions** menu for the **TLS Inbound** policy. +1. On the **TLS Inbound** policy page, provide the requested information. + + - **Protocols:** The TLS and SSL protocols that will be used for securing communication. + - **Cipher:** The set of algorithms or a set of instructions/steps that helps to establish the secure connection. + - **Session Timeout:** Specifies the time during which a client may reuse the session parameters. + - **Session Cache:** Sets whether a session can be re-used. When off, a full negotiation is performed for every connection. + - **Session Type:** Determines the cache type for re-using sessions. + - **Session Size:** Sets the shared cache size. + +1. Upload a Server Certificate, Certificate Key, and CA Certificate. + + - Select the upload icon in the **Server Certificate** field and browse for the desired certificate on your file system. + - Select the upload icon in the **Certificate Key** field and browse for the desired key file on your file system. + - Select the upload icon in the **CA Certificates** field and browse for the desired Root CA certificate on your file system. + +1. (Optional) Select the **Verify Client Certificate** toggle and complete the configurations as appropriate. +1. Select **Add** to save and add the policy. +1. Select **Save and Submit**. + +{{%/tab%}} +{{%tab name="API"%}} + +```json +"policies": { + "tls-inbound": [ + { + "data": { + "serverCerts": [ + { + "key": {{tlsKey}}, + "cert": {{tlsCert}} + } + ], + "trustedRootCACerts":{{caCert}} + } + } + ] +} +``` + +{{%/tab%}} +{{}} + +### Verify HTTPS Connection + +Once the Environment configuration has been submitted and applied, the **Job Status** for the Environment will change to **Success**. +You can then navigate to the Dev Portal user interface to verify that your connection is secured using HTTPS. + +--- + +## Secure Communications between API Proxy and Backend Service + +Take the steps in this section to secure the communications between your Proxies and the associated API backend services. When mTLS is enabled, the API Gateway identifies itself to the backend service using an SSL client certificate. + +### Add TLS Backend Policy {#add-tls-backend} + +{{}} +{{%tab name="UI"%}} + +1. In the API Connectivity Manager user interface, go to **Workspaces > Environments > \**, where "your environment" is the Environment that contains the API Gateway to be updated. +1. Select **Edit Advanced Config** from the **Actions** menu for the desired API Gateway. +1. Select the **Global Policies** tab, then select **Add Policy** from the **Actions** menu for the **TLS Backend** policy. +1. On the **TLS Backend** policy page, provide the requested information. + + - **Protocols:** The TLS and SSL protocols that will be used for securing communication to the proxied server. + - **Cipher:** The set of algorithms or a set of instructions/steps that helps to establish the secure connection to the proxied server. + - **Verify Certificate Chain Depth:** Sets the verification depth in the client certificates chain. + +1. Upload a Client Certificate, Certificate Key, and CA Certificate. + + - Select the upload icon in the **Client Certificate** field and browse for the desired certificate on your file system. + - Select the upload icon in the **Certificate Key** field and browse for the desired key file on your file system. + - (Optional) Provide the Client ID and select the upload icon to upload a Trusted Root CA, then browse for the desired Root CA certificate on your file system. + +1. Select **Add** to save and add the policy. +1. Select **Save and Submit**. + +{{%/tab%}} +{{%tab name="API"%}} + +```json +"policies": { + "tls-backend": [ + { "action": { + "cipher": "HIGH:!aNULL:!MD5", + "protocols": [ + "TLSv1.2" + ] + }, + "data": { + "trustedRootCACerts":"{{caCert}}", + "clientCerts": [ + { + "cert": "{{clientCert}}", + "key": "{{clientKey}}" + + } + ] + } + } + ] +} +``` + +{{%/tab%}} +{{}} + +Once the Environment configuration has been submitted and applied, the **Job Status** for the Environment will change to **Success**. + +--- + +## Secure Communications Between API Connectivity Manager and Dev Portal Hosts + +Take the steps in this section to secure communications between the API Connectivity Manager management plane and Dev Portal hosts. + +### Add TLS Policies to External Developer Portal {#tls-external-cluster} + +{{}} +{{%tab name="UI"%}} + +1. In the API Connectivity Manager user interface, go to **Workspaces > Environments > \**, where "your environment" is the Environment that contains the Developer Portal. +1. Add the [TLS Inbound](#add-tls-inbound) and [TLS Backend](#add-tls-backend) policies to your Developer Portal. +1. Save and submit your changes. + +{{%/tab%}} +{{%tab name="API"%}} + +```json +{ + "name": "{{environmentName}}", + "functions": [ + "DEVPORTAL" + ], + "proxies": [ + { + "proxyClusterName": "{{portalInstanceGName}}", + "hostnames": [ + "{{portalEnvironmentHostname}}" + ], + "runtime": "PORTAL-PROXY", + "policies": { + "tls-inbound": [ + { + "action": { + "cipher": "ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5", + "protocols": [ + "TLSv1.2" + ], + "sessionCache": { + "enable": "on", + "size": "10M", + "type": "shared" + }, + "sessionTimeout": "5m" + }, + "data": { + "trustedRootCACerts": { + "clientId": "clientId1", + "cert": "{{}}" + }, + "serverCerts": [ + { + "key": {{tlsServerKey}}, + "cert": {{tlsServerCert}} + } + ] + } + } + ], + "tls-backend": [ + { + "action": { + "cipher": "HIGH:!aNULL:!MD5", + "sessionReuse": false, + "proxyServerName": false, + "protocols": [ + "TLSv1.2" + ] + }, + "data": { + "trustedRootCACerts":"{{caCert}}", + "clientCerts": [ + { + "key": {{tlsClientKey}}, + "cert": {{tlsClientCert}} + } + ] + } + } + ] + } + } + ] +} +``` + +{{%/tab%}} +{{}} + +### Secure Communication between Portal and API Connectivity Manager using TLS Policies {#tls-internal-cluster} + +{{}} +{{%tab name="UI"%}} + +1. Select **Edit Portal <-> API Connectivity Manager Connectivity** from the **Actions** menu for your desired developer portal. +1. [Add TLS Listener(s)](#add-a-tls-listener). +1. Add the [TLS Inbound](#add-tls-inbound) policy. + + - Complete the fields as desired. + - Upload the Server Certificate and Certificate Key. + - On the same **TLS Inbound** policy page, select the **Verify Client Certificate** option. + - Provide the Certificate Authority (CA) certificates and a Client ID. + - Select **Add**. + +1. Add the [TLS Backend](#add-tls-backend) policy. + + - Complete the fields as desired. + - Upload the Client Certificate and Certificate Key. + - Select **Add**. + +1. Save and submit your changes. + +{{%/tab%}} +{{%tab name="API"%}} + +```json +{ + "name": "{{environmentName}}", + "functions": [ + "DEVPORTAL" + ], + "proxies": [ + { + "proxyClusterName": "{{portalInstanceGName}}", + "hostnames": [ + "acm.{{portalEnvironmentHostname}}" + ], + "runtime": "PORTAL-PROXY", + "listeners": [ + { + "transportProtocol": "HTTP", + "port": 443, + "tlsEnabled": true, + "ipv6": false + } + ], + "policies": { + "tls-inbound": [ + { + "action": { + "cipher": "ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5", + "protocols": [ + "TLSv1.2" + ], + "sessionCache": { + "enable": "on", + "size": "10M", + "type": "shared" + }, + "sessionTimeout": "5m", + "enableMTLS": { + "certVerify": true, + "certChainVerifyDepth": 2 + } + }, + "data": { + "serverCerts": [ + { + "key": {{tlsServerKey}}, + "cert": {{tlsServerCert}} + } + ], + "clientCerts": [ + { + "clientId": "client-1", + "cert": {{caCert}}, + } + ] + } + } + ], + "tls-backend": [ + { + "action": { + "cipher": "HIGH:!aNULL:!MD5", + "sessionReuse": false, + "proxyServerName": false, + "protocols": [ + "TLSv1.2" + ] + }, + "data": { + "clientCerts": [ + { + "key": {{tlsClientKey}}, + "cert": {{tlsClientCert}} + } + ] + } + } + ] + } + } + ] +} +``` + +{{%/tab%}} +{{}} + +Once the Environment configuration has been submitted and applied, the **Job Status** for the Environment will change to **Success**. + diff --git a/content/nms/acm/how-to/services/_index.md b/content/nms/acm/how-to/services/_index.md new file mode 100644 index 000000000..acaac100f --- /dev/null +++ b/content/nms/acm/how-to/services/_index.md @@ -0,0 +1,5 @@ +--- +title: Services +weight: 200 +url: /nginx-management-suite/acm/how-to/services/ +--- \ No newline at end of file diff --git a/content/nms/acm/how-to/services/publish-api.md b/content/nms/acm/how-to/services/publish-api.md new file mode 100644 index 000000000..60de771cd --- /dev/null +++ b/content/nms/acm/how-to/services/publish-api.md @@ -0,0 +1,127 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to publish + APIs to your API Gateway. +docs: DOCS-927 +doctypes: +- task +tags: +- docs +title: Publish an HTTP API +toc: true +weight: 100 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +API Connectivity Manager lets you manage your API infrastructure by using a set of hierarchical resources. The top-level resource, called a **Workspace**, provides a logical grouping for resources called **Environments**. Environments contain **Clusters** that assign NGINX instances for use as API Gateways and Developer Portals. + +This topic describes how to publish an API config to a cluster. + +### Before You Begin + +Complete the following prerequisites before proceeding with this guide: + +- API Connectivity Manager is installed, licensed, and running. +- You have one or more Environments with [API Gateway]({{}}) or [Dev Portal]({{< relref "/nms/acm/getting-started/add-devportal" >}}) clusters. + +### How to Access the User Interface + +{{< include "acm/how-to/access-acm-ui" >}} + +## Create a Service Workspace + +{{}} +The API Connectivity Manager admin must verify that the user (API Onwer) has CRUD [RBAC](https://en.wikipedia.org/wiki/Role-based_access_control) permissions for the services feature. +{{}} + +Service Workspaces let you group API configurations. Publishing an API requires at least one Service Workspace. + +To create a Service Workspace you need to do the following: + +1. On the sidebar, select **Services**. If this is your first time on the page, you'll see a prompt for creating a new workspace. If not, select the **+Add** button in the top-right corner. +2. Enter a name, description, and any other information you want to provide. +3. Select **Create**. + +## Next Steps + +After creating a Service Workspace, two options are displayed: + +**Publish API Proxy** and **Publish API Doc** + +## Publish an API Proxy {#publish-api-proxy} + +1. Enter the required information in the fields provided. +2. **Service Target Hostname** should point to the backend service you want this API to resolve to. +3. If you choose not to use an OpenAPI spec then you need to add some extra information. +4. For **Gateway Proxy Hostname**, select the hostname of the environment you want to associate with the API. +5. **Base Path** and **Version** build up the URI, for example **/api/v1/**. +6. Select **Publish** to save and publish your API Proxy. + +{{}} +If you choose to use an OpenAPI spec, it will get processed into a config and published. +{{}} + +## Advanced Configurations {#advanced-configurations} + +After publishing the API Proxy, a link to **Edit Advanced Configurations** is displayed. +If you want to create more advanced routing configurations, select this option. +You can upload an OpenAPI spec here too, which has all the necessary API and routing information. + +{{< include "acm/openapi-support" >}} + +To add an Advanced Routing configuration, select the **Ingress** menu item in the advanced section of the menu. + +1. Select **Add Route** in the **Advanced Routes** section. +2. Fill out the required information in the form presented to you. +3. **Match URI** is the value you want to match on for queries. +4. Choose the required **HTTP Method** you want to use for this route match. +5. Change the **Target Backend Service Label** if required to target a specific backend based on the label value. +6. Select **Add Parameter** to add a parameter to the Path, Query, or Header that's used to match on the route. + {{}}If you choose a path parameter then you must have a placeholder for that parameter in **Match URI**.{{}} +7. Select **Add** to finish adding the route. +8. Select **Next** to move to the **Backend** configuration page. + +### Backends + +Backends tell your API where to resolve the queries to, for example your backend server. + +You can add, edit, or delete Backends. + +You can also set [DNS](https://en.wikipedia.org/wiki/Domain_Name_System) resolvers and [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security) on the backend. + +### Policies + +This section ensures you can set policies at the individual API level. + +Check the [Manage Policies]({{< relref "/nms/acm/how-to/policies/manage-policies.md" >}}) documentation for more information. + +## Publish an API Proxy using an OpenAPI Spec + +In the [**Publish an API Proxy**](#publish-api-proxy) form, select the option to use an OpenAPI spec and choose one from the list of existing specs. + +You may also upload a new OpenAPI spec in this form by selecting **+Add API Spec** and uploading the new spec in the file input. + +{{< include "acm/openapi-support" >}} + +## Update a Published API Proxy + +1. On the sidebar, select **Services**. Then on the Services Workspaces page, select the workspace containing the API proxy you want to edit. +2. Select **Edit Proxy** from the **Actions** menu of the Proxy you want to delete. +3. Edit as needed. +4. Select **Save and Publish**. + +{{}} +Certain sections can't be modified for API Proxies created with OpenAPI Specs, for example, **Advanced Routing** in the **Ingress** step. +{{}} + +## Delete a Published API Proxy + +1. On the sidebar, select **Services**. Then on the Services Workspaces page, select the name of the workspace containing the API proxy you want to delete. +2. Select **Delete Proxy** from the **Actions** menu of the Proxy you want to delete. + +## What's Next + +- [Manage Policies]({{< relref "/nms/acm/how-to/policies/manage-policies.md" >}}) +- [Publish a Developer Portal]({{< relref "/nms/acm/getting-started/add-devportal.md" >}}) diff --git a/content/nms/acm/how-to/services/publish-gRPC-proxy.md b/content/nms/acm/how-to/services/publish-gRPC-proxy.md new file mode 100644 index 000000000..d19d4a933 --- /dev/null +++ b/content/nms/acm/how-to/services/publish-gRPC-proxy.md @@ -0,0 +1,413 @@ +--- +description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to publish + a gRPC Proxy and manage traffic to gRPC services. +docs: DOCS-997 +doctypes: +- task +tags: +- docs +title: Publish a gRPC API Proxy +toc: true +weight: 300 +--- + +{{< shortversions "1.2.0" "latest" "acmvers" >}} + +## Overview + +gRPC has emerged as an alternative approach to building distributed applications, particularly microservice applications. API Connectivity Manager supports publishing gRPC services. +The following document describes how to publish a gRPC API proxy using the API Connectivity Manager API or UI. Additionally, this guide outlines the process of setting up a gRPC Echo Server to validate the functionality of the published proxy. + + +## Publish a gRPC API Proxy with Package-level Routing + +{{}} +{{%tab name="API"%}} + +Send a POST request to publish the gRPC API proxy. + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + +```json +{ + "name": "dev-grpc-hello", + "version": "v1", + "proxyConfig": { + "hostname": "example.com", + "grpcIngress": { + "service": "helloworld." + }, + "backends": [ + { + "serviceName": "hello-grpc-service-name", + "serviceTargets": [ + { + "hostname": "grpc-backend.example.com", + "listener": { + "enableTLS": false, + "port": 50051, + "transportProtocol": "GRPC" + } + } + ] + } + ] + } +} +``` + + + + +{{%/tab%}} +{{%tab name="UI"%}} + +1. Open a service workspace. +1. Select **Publish to proxy**. +1. Type a **Backend Service** name. +1. Enter a **Service Target Hostname**. +1. Select GRPC in the **Service Target Transport Protocol** menu. +1. Enter the **Service Target Port**. +1. Enter an **API Proxy** name. +1. Select a **Gateway Proxy Hostname** in the menu. +1. Enter the **Service name** and **Version**; for this example, we use "helloworld" and "v1". +1. Select **Publish**. + +You should now have a published gRPC API proxy with a Lifecycle Status of success. + +{{%/tab%}} +{{}} + +## Publish a gRPC API Proxy with Service-Level Routing + +{{}} +{{%tab name="API"%}} + +Send a POST request to publish the gRPC proxy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "name": "dev-grpc-hello", + "version": "v1", + "proxyConfig": { + "hostname": "example.com", + "grpcIngress": { + "service": "helloworld.Greeter" + }, + "backends": [ + { + "serviceName": "hello-grpc-service-name", + "serviceTargets": [ + { + "hostname": "grpc-backend.example.com", + "listener": { + "enableTLS": false, + "port": 50051, + "transportProtocol": "GRPC" + } + } + ] + } + ] + } +} +``` + + + + +{{%/tab%}} +{{%tab name="UI"%}} + +To configure the proxy to route by service: + +1. Open the proxy and select **Ingress**. +1. Type "helloWorld.Greeter" in the **Service Name** field. +1. Select **Save and Publish**. + +{{%/tab%}} +{{}} + + +## Publish a gRPC API Proxy with Advanced Routes with a gRPC Method + +{{}} +{{%tab name="API"%}} + +Send a POST request to publish the gRPC proxy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "name": "dev-grpc-hello", + "version": "v1", + "proxyConfig": { + "hostname": "example.com", + "grpcIngress": { + "service": "helloworld.Greeter", + "routes": [ + { + "method": "SayGoodbye", + "targetBackendServiceLabel": "default" + }, + { + "method": "SayHello", + "targetBackendServiceLabel": "default" + } + ] + }, + "backends": [ + { + "serviceName": "hello-grpc-service-name", + "serviceTargets": [ + { + "hostname": "grpc-backend.example.com", + "listener": { + "enableTLS": false, + "port": 50051, + "transportProtocol": "GRPC" + } + } + ] + } + ] + } +} +``` + + + + +{{%/tab%}} +{{%tab name="UI"%}} + + +To configure the proxy with an advanced route + +1. Open the proxy and select the **Ingress**. +1. Select **Add route** and enter the **GRPC Method**; for example, "SayGoodbye". +1. Select **Save and Publish**. +1. Proceed to [Set Up gRPC Echo Server and Test gRPC API Proxy](#setup-grpc-echo-server-optional) for the next steps. + +{{%/tab%}} +{{}} + + +## Service-Level Routing using Labels +{{}} +{{%tab name="API"%}} + +Send a POST request to publish the gRPC proxy. + + +{{}} + +| Method | Endpoint | +|----------|---------------------------------------------------------| +| `POST` | `/services/workspaces//proxies` | + +{{}} + + +```json +{ + "name": "dev-grpc-hello", + "version": "v1", + "proxyConfig": { + "hostname": "example.com", + "grpcIngress": { + "service": "helloworld.Greeter", + "routes": [ + { + "method": "SayGoodbye", + "targetBackendServiceLabel": "custom" + }, + { + "method": "SayHello", + "targetBackendServiceLabel": "default" + } + ] + }, + "backends": [ + {"label": { + "targetName": "default" + }, + "serviceName": "hello-grpc-service-name", + "serviceTargets": [ + { + "hostname": "grpc-backend.example.com.1", + "listener": { + "enableTLS": false, + "port": 50051, + "transportProtocol": "GRPC" + } + } + ] + }, + { + "label": { + "targetName": "custom" + }, + "serviceName": "hello-grpc-service-name", + "serviceTargets": [ + { + "hostname": "grpc-backend.example.com.2", + "listener": { + "enableTLS": false, + "port": 50051, + "transportProtocol": "GRPC" + } + } + ] + } + ] + } +} +``` + + + + + +{{%/tab%}} +{{%tab name="UI"%}} + +If you have multiple backend servers and want to route to a specific backend server, you can use labels. + +1. Open the proxy and select **Backend**. +1. Enter a **Service Name** and **Service Version**. +1. Add a label for the backend service, "custom2". +1. Type the **Service Target Hostname**. +1. Select **Add**. +1. Select **Save and Publish**. +1. [Setup gRPC Echo Server and Test gRPC API Proxy](#setup-grpc-echo-server-optional). + +{{%/tab%}} +{{}} + +## Backends + +Backends specify where your API should send queries, such as to your backend server. + +You can add, edit, or delete backends. + +You can also set [DNS](https://en.wikipedia.org/wiki/Domain_Name_System) resolvers and [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security) on the backend. + +## Policies + +This section ensures you can set policies at the individual API level. + +For more information, refer to the [Manage Policies]({{< relref "/nms/acm/how-to/policies/manage-policies.md" >}}) documentation. + +## Update a Published API Proxy + +1. On the sidebar, select **Services**. Then on the Services Workspaces page, select the workspace containing the API proxy you want to edit. +2. Select **Edit Proxy** from the **Actions** menu of the Proxy you want to delete. +3. Edit as needed. +4. Select **Save and Publish**. + +## Delete a Published API Proxy + +1. On the sidebar, select **Services**. Then on the Services Workspaces page, select the name of the workspace containing the API proxy you want to delete. +2. Select **Delete Proxy** from the **Actions** menu of the Proxy you want to delete. + +## Set Up gRPC Echo Server (Optional) {#setup-grpc-echo-server-optional} + +This section explains how to set up a gRPC echo server to verify that the gRPC API works as expected. + +From a command line terminal: + +1. Create a virtual environment and install the required packages: + + ```shell + virtualenv echo-servers + source echo-servers/bin/activate + pip install grpcio protobuf grpcio-tools + ``` + +1. Create a file named `helloworld.proto` and add the following content: + + ```shell + syntax = "proto3"; + + package helloworld; + + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + rpc SayGoodbye (GoodbyeRequest) returns (GoodbyeReply) {} + } + + message HelloRequest { + string name = 1; + } + + message HelloReply { + string message = 1; + } + + message GoodbyeRequest { + string name = 1; + } + + message GoodbyeReply { + string message = 1; + } + ``` + +1. Run the following command to generate the python code: `python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. helloworld.proto` +1. Create `server.py` Add the following to the file: + + ```shell + import grpc + import helloworld_pb2 + import helloworld_pb2_grpc + from concurrent import futures + + class GreeterServicer(helloworld_pb2_grpc.GreeterServicer): + def SayHello(self, request, context): + response = helloworld_pb2.HelloReply(message='Hello, ' + request.name) + return response + + def SayGoodbye(self, request, context): + response = helloworld_pb2.GoodbyeReply(message='Goodbye, ' + request.name) + return response + + def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + helloworld_pb2_grpc.add_GreeterServicer_to_server(GreeterServicer(), server) + server.add_insecure_port('[::]:50051') + server.start() + server.wait_for_termination() + + if __name__ == '__main__': + serve() + ``` + +1. Run `python server.py`. +1. To confirm the server is running, run the command `netstat -tulpn | grep 50051`. +1. For step-by-step instructions on how to set up gRPC testing using Postman, refer to [Testing gRPC APIs with Postman](https://blog.postman.com/testing-grpc-apis-with-postman/). This guide will help you test and validate your gRPC APIs effectively using Postman. diff --git a/content/nms/acm/releases/_index.md b/content/nms/acm/releases/_index.md new file mode 100644 index 000000000..2cf3243ea --- /dev/null +++ b/content/nms/acm/releases/_index.md @@ -0,0 +1,6 @@ +--- +title: Releases +description: "Stay up-to-date with the latest F5 NGINX Management Suite API Connectivity Manager releases." +weight: 800 +url: /nginx-management-suite/acm/releases/ +--- \ No newline at end of file diff --git a/content/nms/acm/releases/known-issues.md b/content/nms/acm/releases/known-issues.md new file mode 100644 index 000000000..827e86397 --- /dev/null +++ b/content/nms/acm/releases/known-issues.md @@ -0,0 +1,850 @@ +--- +description: This document is a summary of the known issues in F5 NGINX Management Suite + API Connectivity Manager. Fixed issues are removed after **45 days**.

    We recommend + upgrading to the latest version of API Connectivity Manager to take advantage of + new features, improvements, and bug fixes.

    +docs: DOCS-930 +doctypes: +- reference +tags: +- docs +title: Known Issues +toc: true +weight: 200 +--- + +{{}} + + +--- + +## 1.9.2 +March 14, 2024 + +### {{% icon-bug %}} Helm chart backup and restore is broken {#44766} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44766 | Open | + +{{}} +#### Description +Helm backup and restore will not run in ACM-1.9.1 on NMS-2.15.x due to an underlying change in the dqlite client. + +#### Workaround + +None + +--- + +## 1.9.1 +October 05, 2023 + +### {{% icon-resolved %}} JWT tokens are overwritten when multiple proxies are assigned to one gateway {#44636} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44636 | Fixed in API Connectivity Manager 1.9.2 | + +{{}} +#### Description +When multiple API proxies, each with its own JSON Web Token Assertion policy, are assigned to one gateway, the directives are overwritten by one another. + +#### Workaround + +None + +--- + +## 1.9.0 +September 07, 2023 + +### {{% icon-resolved %}} Module crashes when an OpenAPI spec is uploaded with a global security requirement that contains an empty security requirement object {#44393} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44393 | Fixed in API Connectivity Manager 1.9.1 | + +{{}} +#### Description +API Connectivity Manager crashes when an OpenAPI specification file is uploaded with a global security requirement block containing an empty `security` object. + +Example OpenAPI security requirement with empty security object: + +```none +"security": [{}] +``` + +--- + +## 1.8.0 +July 27, 2023 + +### {{% icon-resolved %}} Cannot use TLS enabled backend with HTTP backend-config policy {#44212} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 44212 | Fixed in API Connectivity Manager 1.9.0 | + +{{}} +#### Description +When configuring a backend-config policy with the transport protocol set to HTTP for an API, if TLS is enabled on that APIs backend, then the configuration will fail with the following error in the API Connectivity Manager log file: + "Backend Config policy failed when checking transport protocol match because of: the backend-config policy transport protocol http does not match the proxy backend transport protocol https" + +--- + +### {{% icon-resolved %}} Deployment fails due to duplicate locations {#43673} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43673 | Fixed in API Connectivity Manager 1.9.0 | + +{{}} +#### Description +When more than one version of an API is published and Append Rule is set to "None", the deployment fails due to duplicate locations. + +--- + +### {{% icon-resolved %}} Certificates associated with empty instance groups can be deleted, resulting in a broken reference in the API Connectivity Manager module {#43671} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43671 | Fixed in API Connectivity Manager 1.9.0 | + +{{}} +#### Description +In the Instance Manager **Certificates and Keys** web interface, you can delete API Connectivity Manager TLS Policy certificates associated with empty instance groups. However, this action may lead to a broken reference problem in the API Connectivity Manager module, resulting in the inability to modify or delete the broken Environment from the web interface. + +#### Workaround + +You can delete the Environment using the API if it cannot be modified or deleted using the web interface. + +--- + +## 1.7.0 +June 21, 2023 + +### {{% icon-resolved %}} Environments with WAF enabled may transition to a Failed status when a Developer Portal cluster is added. {#43231} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43231 | Fixed in API Connectivity Manager 1.8.0 | + +{{}} +#### Description +If you add a Developer Portal cluster to an environment that has WAF enabled, the environment may transition to a `Failed` status. If this happens, follow the steps in the workaround. + +#### Workaround + +On the Developer Portal: + +1. Open an SSH connection to the Developer Portal and log in. +2. [Install F5 NGINX App Protect]({{< relref "/nap-waf/v4/admin-guide/install.md" >}}). +3. Stop the NGINX Agent: + + ```bash + sudo systemctl stop nginx-agent + ``` + +4. Run the onboarding command to add the Developer Cluster: + + ```bash + curl -k https:///install/nginx-agent > install.sh && sudo sh install.sh -g -m precompiled-publication --nap-monitoring true && sudo systemctl start nginx-agent + ``` + + Replace `` with the fully qualified domain name of your NGINX Management Suite, and `` with the name of the Developer Cluster. + + +5. Confirm the NGINX Agent is started and restart if necessary: + + ```bash + sudo systemctl status nginx-agent + sudo systemctl start nginx-agent + ``` + +--- + +### {{% icon-resolved %}} Resources deployed to a Developer Portal which has had its database reset cannot be updated or removed {#43140} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 43140 | Fixed in API Connectivity Manager 1.9.0 | + +{{}} +#### Description +It is not possible to remove resources from API Connectivity Manager which have been published to a Developer Portal if the Developer Portal database is cleared. + +--- + +## 1.6.0 +May 11, 2023 + +### {{% icon-resolved %}} Multiple entries selected when gateway proxy hostnames are the same {#42515} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42515 | Fixed in API Connectivity Manager 1.7.0 | + +{{}} +#### Description +Multiple entries are selected when gateway proxy hostnames are the same. + +#### Workaround + +There is no impact to functionality. + +--- + +### {{% icon-resolved %}} The routes filter under the proxy metrics page won’t work with params {#42471} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42471 | Fixed in API Connectivity Manager 1.7.0 | + +{{}} +#### Description +The routes filter under the proxy metrics page won’t work with params currently. + +For example, `/api/v1/shops/{shopID}` + +The API won’t match on the above route. + +--- + +## 1.5.0 +March 28, 2023 + +### {{% icon-bug %}} Using policies with targetPolicyName set to anything other than the default value can cause unexpected results. {#42682} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42682 | Open | + +{{}} +#### Description +Creating a policy with metadata of “targetPolicyName” set to anything but default can cause issues with secrets being duplicated if more than one policy is created. Setting this value to anything but the default value will also cause the policy to not be applied. The policy may be shown as applied in the UI when it is not. + +#### Workaround + +Do not modify the “targetPolicyName” to be anything but the default value. + +--- + +### {{% icon-resolved %}} Array values in token claims are treated as string values {#42388} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 42388 | Fixed in API Connectivity Manager 1.6.0 | + +{{}} +#### Description +When an Access Control Routing match rule targeted a token value that contained an array, the array was collapsed into a comma-separated string. However, the expected behavior is for rules targeting arrays to pass if any value within the array matches the condition, rather than requiring the entire array to match. + +--- + +### {{% icon-resolved %}} Developer Portal: When typing the links to use for the footer, the text boxes keep losing focus {#41626} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 41626 | Fixed in API Connectivity Manager 1.6.0 | + +{{}} +#### Description +The **Text to Display** and **URL** boxes on the Developer Portal's _Configure Footer_ page lose focus when text is being typed. + +#### Workaround + +You may need to click back into the boxes several times while typing to regain focus. + +--- + +### {{% icon-resolved %}} TLS setting on listener is not reset when TLS policy is removed {#41426} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 41426 | Fixed in API Connectivity Manager 1.6.0 | + +{{}} +#### Description +When a TLS policy is removed from an environment, the web interface will not automatically adjust the TLS setting on the listener. As a result, the listener will remain in the `TLS enabled` state, leading to an unsuccessful attempt to save and publish the environment. + +#### Workaround + +Toggle the TLS setting in the web interface when removing the TLS policy from an environment. + +--- + +## 1.4.0 +January 23, 2023 + +### {{% icon-resolved %}} Cluster and Environment deletion issues when Portal Docs are published {#40163} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 40163 | Fixed in API Connectivity Manager 1.4.1 | + +{{}} +#### Description +When a developer portal proxy is hosting API documentation, the infrastructure admin is, in some cases, unable to delete clusters in other unrelated Environments and, therefore, unable to delete those same Environments. + +--- + +### {{% icon-resolved %}} The Proxy Cluster API isn't ready to be used {#40097} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 40097 | Fixed in API Connectivity Manager 1.5.0 | + +{{}} +#### Description +The API Connectivity Manager API documentation has inadvertently released details of Proxy Cluster endpoints and related policies before their public launch. Consequently, the following Proxy Cluster endpoints and global policies should not be used yet. + +The following Proxy Cluster endpoints are not ready for use: + +- `/infrastructure/workspaces/{workspaceName}/proxy-clusters` +- `/infrastructure/workspaces/{workspaceName}/proxy-clusters/{name}` + +The following global policies are not yet ready for use: + +- cluster-zone-sync +- cluster-wide-config + +A later version of the release notes will inform you when these endpoints and policies are ready. + +--- + +### {{% icon-resolved %}} Configurations aren't pushed to newly onboarded instances if another instance is offline {#40035} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 40035 | Fixed in API Connectivity Manager 1.5.0 | + +{{}} +#### Description +When a new instance is onboarded, it will not be configured if any other instances are offline. + +#### Workaround + +After onboarding the instance as usual, push the existing configuration again to the new instance, without making any changes. + +--- + +## 1.3.0 +December 12, 2022 + +### {{% icon-resolved %}} OIDC policy cannot be applied alongside a proxy authentication policy {#39604} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 39604 | Fixed in API Connectivity Manager 1.4.0 | + +{{}} +#### Description +It is not possible to use both an OpenID Connect (OIDC) policy and a proxy authentication policy concurrently. + +--- + +### {{% icon-resolved %}} The web interface doesn't pass the `enableSNI` property for the TLS backend policy {#39445} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 39445 | Fixed in API Connectivity Manager 1.3.1 | + +{{}} +#### Description +When configuring a TLS backend policy in the web interface, the new `enableSNI` property does not match the value of the deprecated `proxyServerName` property, resulting in an API error. The `enableSNI` value must be the same as `proxyServerName` value. + +#### Workaround + +Use the NGINX Management Suite API Connectivity Manager REST API to send a PUT request to the following endpoint, providing the correct values for `enableSNI` and `proxyServerName`. Both values must match. + +{{< raw-html>}}
    {{}} +{{}} +| Method | Endpoint | +|--------|--------------------------------------------------------------------------------------| +| PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}` | +{{}} +{{< raw-html>}}
    {{}} + +--- + +### {{% icon-resolved %}} A JWT token present in a query parameter is not proxied to the backend for advanced routes {#39328} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 39328 | Fixed in API Connectivity Manager 1.4.0 | + +{{}} +#### Description +When using JWT authentication with advanced routes, a JWT token that is provided as a query parameter will not be proxied to the backend service. + +#### Workaround + +Pass the JWT token as a header instead of providing the JWT token as a query parameter. + +--- + +## 1.2.0 +October 18, 2022 + +### {{% icon-resolved %}} Developer Portal backend information is unintentionally updated when editing clusters within an environment {#39409} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 39409 | Fixed in API Connectivity Manager 1.3.1 | + +{{}} +#### Description +The Developer Portal backend information may be inadvertently updated in the following circumstances: + +1. If you have multiple Developer Portal clusters and update the backend information (for example, enable TLS or change the host or port, etc. ) for any of those clusters, the update is applied to all of the clusters. + +2. If you have one or more Developer Portal clusters and update any other cluster in the environment (for example, the API Gateway or Developer Portal Internal cluster), the backend settings for the Developer Clusters are reset to their defaults (127.0.0.1/8080/no TSL). + +#### Workaround + +- Workaround for scenario #1 + + Use the NGINX Management Suite API Connectivity Manager REST API to send a PUT request to the following endpoint with the correct backend settings for each Developer Portal cluster: + + {{< raw-html>}}
    {{}} + {{}} + | Method | Endpoint | + |--------|--------------------------------------------------------------------------------------| + | PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}` | + {{}} + {{< raw-html>}}
    {{}} + +- Workaround for scenario #2 + + If you have just one Developer Portal cluster, you can use the web interface to update the backend settings for the cluster if you're not using the default settings. + + If you have more than one Developer Portal cluster, use the NGINX Management Suite API Connectivity Manager REST API to send a PUT request to the following endpoint with the correct backend settings for each cluster: + + {{< raw-html>}}
    {{}} + {{}} + | Method | Endpoint | + |--------|--------------------------------------------------------------------------------------| + | PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}` | + {{}} + {{< raw-html>}}
    {{}} + +--- + +### {{% icon-resolved %}} The user interface is erroneously including irrelevant information on the TLS inbound policy workflow {#38046} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 38046 | Fixed in API Connectivity Manager 1.3.0 | + +{{}} +#### Description +On the TLS inbound policy, toggling `Enable Client Verification` On/Off results in the user interface adding irrelevant information that causes the publish to fail due to validation error. + +#### Workaround + +Dismiss the policy without saving and restart the UI workflow to add the TLS inbound policy. + +--- + +### {{% icon-resolved %}} Portals secured with TLS policy require additional environment configuration prior to publishing API docs {#38028} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 38028 | Fixed in API Connectivity Manager 1.3.0 | + +{{}} +#### Description +When the `tls-backend` policy is applied on a developer portal cluster, the communication between the portal UI and portal backend service is secured. By default, when the portal cluster is created, and the backend is not explicitly specified in the payload, it defaults to HTTP. Adding the tls-backend policy does not automatically upgrade the protocol to HTTPS. If the protocol is not set to HTTPS, publishing API docs to the portal will fail. The user has to explicitly change the backend protocol to HTTPS. + +#### Workaround + +In the user interface, navigate to Workspace > Environment > Developer Portal Clusters > Edit Advanced Config. Select "edit the Backend" and toggle the Enable TLS switch to enabled. + +--- + +### {{% icon-resolved %}} A proxy deployed with a `specRef` field (OAS) and `basePathVersionAppendRule` set to other than `NONE` may cause versions to appear twice in the deployed location block {#36666} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 36666 | Fixed in API Connectivity Manager 1.9.0 | + +{{}} +#### Description +If you add an API doc and reference it with the `specRef` field in the proxy object, the OAS (API doc) is used as the source of truth for the base path. If the OAS (API doc) contains the full correct base path, and you use any `basePathVersionAppendRule` value other than `NONE`, the base path will be corrupted by appending/prepending the version in the deployment (e.g. `/api/v3/v3`). + +#### Workaround + +If you are using an API doc with a proxy: + + 1. Put the entire true base path of the API in the server section of the API doc: + + ```nginx + Servers: + - url: https://(API-address)/api/v3 + ``` + + or + + ```nginx + Servers: + - url: /api/v3 + ``` + + {{< note >}}In the example above only `/api/v3` is relevant for this issue, and it should be the full base path to which the individual paths in the API document can be appended directly. {{< /note >}} + + 2. Set the value of the base path version append rule (`basePathVersionAppendRule`) in the proxy to `NONE`. + +--- + +### {{% icon-resolved %}} New users are unable to see pages even though they have been given access. {#36607} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 36607 | Fixed in API Connectivity Manager 1.3.0 | + +{{}} +#### Description +A newly created role needs a minimum of READ access on the LICENSING feature. Without this, the users will not have access to the pages even though they have been granted permission. They will see 403 errors surfacing as license errors while accessing the pages. + +#### Workaround + +Assign a minimum of READ access on the LICENSING feature to all new roles + +--- + +## 1.1.0 +August 18, 2022 + +### {{% icon-resolved %}} To see updates to the Listener's table, forced refresh of the cluster details page is required. {#36540} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 36540 | Fixed in API Connectivity Manager 1.2.0 | + +{{}} +#### Description +When trying to update the Advance Config for Environment cluster, changes are not reflected on the cluster details page after saving and submitting successfully. + +#### Workaround + +Refresh or reload the browser page to see changes on the cluster details page. + +--- + +### {{% icon-resolved %}} Using labels to specify the backend is partially available {#36317} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 36317 | Fixed in API Connectivity Manager 1.2.0 | + +{{}} +#### Description +The `targetBackendServiceLabel` label is not editable through the web interface. `targetBackendServiceLabel` is not configurable at the URI level in the spec. + +#### Workaround + +`targetBackendServiceLabel` label can be updated by sending a PUT command to the API. + +--- + +### {{% icon-resolved %}} Ratelimit policy cannot be applied with OAuth2 JWT Assertion policy. {#36095} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 36095 | Fixed in API Connectivity Manager 1.2.0 | + +{{}} +#### Description +Rate limit policy cannot be applied with the OAuth2 JWT assertion policy. + +--- + +### {{% icon-resolved %}} Enums are not supported in Advanced Routing. {#34854} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 34854 | Fixed in API Connectivity Manager 1.2.0 | + +{{}} +#### Description +Enums cannot be set for path or query parameters while applying advanced routing. A list of specific values cannot be specified for their advanced routing parameters. + +--- + +## 1.0.0 +July 19, 2022 + +### {{% icon-resolved %}} The API Connectivity Manager module won't load if the Security Monitoring module is enabled {#39943} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 39943 | Fixed in Instance Manager 2.8.0 | + +{{}} +#### Description +If you have Instance Manager 2.7 or earlier installed and attempt to enable both the API Connectivity Manager and Security Monitoring modules on the same NGINX Management Suite management plane, the API Connectivity Manager module will not load because of incompatibility issues with the Security Monitoring module. + +#### Workaround + +Before enabling the API Connectivity Manager and Security Monitoring modules, ensure that your Instance Manager is upgraded to version 2.8 or later. Be sure to read the release notes for each module carefully, as they may contain important information about version dependencies. + +To see which version of Instance Manager you have installed, run the following command: + +- CentOS, RHEL, RPM-based: + + ```bash + yum info nms-instance-manager + ``` + +- Debian, Ubuntu, Deb-based: + + ```bash + dpkg -s nms-instance-manager + ``` + +--- + +### {{% icon-resolved %}} Credentials endpoint is disabled by default {#35630} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 35630 | Fixed in API Connectivity Manager 1.2.0 | + +{{}} +#### Description +For security reasons, the credentials endpoint on API Connectivity Manager(ACM) is disabled by default. To use the developer portal credentials workflow, configuration changes need to be made on the ACM host to enable credentials endpoints. Also, communication between ACM and the developer portal can be secured by providing certificates. + +#### Workaround + +To enable the credentials endpoints on ACM host + +1. SSH to the ACM host +1. Enable resource credentials endpoint + In {{/etc/nms/nginx/locations/nms-acm.conf}}, uncomment the location block + + ```nginx + #Deployment of resource credentials from the devportal + # Uncomment this block when using devportal. Authentication is disabled + # for this location. This location block will mutually + # verify the client trying to access the credentials API. + # location = /api/v1/devportal/credentials { + # OIDC authentication (uncomment to disable) + # auth_jwt off; + # auth_basic off; + # error_page 401 /401_certs.json; + # if ($ssl_client_verify != SUCCESS) { + # return 401; + # } + # proxy_pass http://apim-service/api/v1/devportal/credentials; + #} + ``` + +1. Save the changes. +1. Reload NGINX on the ACM host: `nginx -s reload` + +--- + +### {{% icon-resolved %}} Unable to delete an environment that is stuck in a Configuring state. {#35546} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 35546 | Fixed in API Connectivity Manager 1.2.0 | + +{{}} +#### Description +In the web interface, after deleting all of the proxy clusters is an environment that's in a `FAIL` state, the environment may transition to a `CONFIGURING` state and cannot be deleted. + +#### Workaround + +Add back the deleted proxy clusters using the web interface. The environment will transition to a `Fail` state. At this point, you can use the API to delete the proxy by sending a `DELETE` request to: + +``` text +https:///api/acm/v1/infrastructure/workspaces//environments/ +``` + +--- + +### {{% icon-resolved %}} Installing NGINX Agent on Ubuntu 22.04 LTS fails with `404 Not Found` error {#35339} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 35339 | Fixed in API Connectivity Manager 1.3.0 | + +{{}} +#### Description +When installing the NGINX Agent on Ubuntu 22.04 LTS, the installation script fails with a `404 Not Found` error similar to the following: + +```text +404 Not found [IP: ] +Reading package lists... +E: The repository 'https://192.0.2.0/packages-repository/deb/ubuntu jammy Release' does not have a Release file. +E: The repository 'https://pkgs.nginx.com/app-protect/ubuntu jammy Release' does not have a Release file. +E: The repository 'https://pkgs.nginx.com/app-protect-security-updates/ubuntu jammy Release' does not have a Release file. +``` + +#### Workaround + +Edit the NGINX Agent install script to use the codename `focal` for Ubuntu 20.04. + +1. Download the installation script: + + ```bash + curl -k https:///install/nginx-agent > install.sh + ``` + +2. Open the `install.sh` file for editing. +3. Make the following changes: + + On **lines 256-258**, change the following: + + ```text + codename=$(cat /etc/*-release | grep '^DISTRIB_CODENAME' | + sed 's/^[^=]*=\([^=]*\)/\1/' | + tr '[:upper:]' '[:lower:]') + ``` + + to: + + ```text + codename=focal + ``` + +
    + + **—OR—** + + Alternatively, on **line 454**, change the following: + + ```text + deb ${PACKAGES_URL}/deb/${os}/ ${codename} agent + ``` + + to: + + ```text + deb ${PACKAGES_URL}/deb/${os}/ focal agent + ``` + +4. Save the changes. +5. Run the `install.sh` script. + +--- + +### {{% icon-bug %}} OIDC policy cannot be applied on a shared proxy cluster {#35337} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 35337 | Open | + +{{}} +#### Description +If the same proxy cluster is used for both the Developer Portal and API Gateway, the OIDC Policy is not applied. + +#### Workaround + +Within an environment, use separate proxy clusters for the Developer Portal and API Gateway when applying an OIDC policy. + +--- + +### {{% icon-resolved %}} No validation when conflicting policies are added {#34531} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 34531 | Fixed in API Connectivity Manager 1.3.0 | + +{{}} +#### Description +When securing the API Proxy with policies like basic authentication or APIKey authentication, the user is not warned if a duplicate or conflicting policy is already added. Conflicting policies are not validated. + +#### Workaround + +Secure the API proxy with only one policy. + +--- + +### {{% icon-resolved %}} CORS policy doesn't support proxying preflight requests to the backend when combined with an authentication policy {#34449} + +{{}} + +| Issue ID | Status | +|----------------|--------| +| 34449 | Fixed in API Connectivity Manager 1.6.0 | + +{{}} +#### Description +On an API Proxy with an authentication policy, applying a CORS policy with `preflightContinue=true` is not supported. + +#### Workaround + +Apply CORS policy and set `preflightContinue=false`. diff --git a/content/nms/acm/releases/release-notes.md b/content/nms/acm/releases/release-notes.md new file mode 100644 index 000000000..d75f78130 --- /dev/null +++ b/content/nms/acm/releases/release-notes.md @@ -0,0 +1,990 @@ +--- +description: These release notes list and describe the new features, enhancements, + and resolved issues in NGINX Management Suite API Connectivity Manager. +docs: DOCS-931 +tags: +- docs +title: Release Notes +toc: true +weight: 100 +--- + +{{}} + +--- + +## 1.9.3 + +November 06, 2024 + +### Upgrade Paths {#1-9-3-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.6.0 - 1.9.2 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-9-3-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Known Issues{#1-9-3-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.9.2 + +March 14, 2024 + +### Upgrade Paths {#1-9-2-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.6.0 - 1.9.1 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-9-2-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Resolved Issues{#1-9-2-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} JWT tokens are overwritten when multiple proxies are assigned to one gateway [(44636)]({{< relref "/nms/acm/releases/known-issues.md#44636" >}}) + +### Known Issues{#1-9-2-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.9.1 + +October 05, 2023 + +### Upgrade Paths {#1-9-1-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.6.0 - 1.9.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-9-1-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Resolved Issues{#1-9-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Module crashes when an OpenAPI spec is uploaded with a global security requirement that contains an empty security requirement object [(44393)]({{< relref "/nms/acm/releases/known-issues.md#44393" >}}) + +### Known Issues{#1-9-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.9.0 + +September 07, 2023 + +### Upgrade Paths {#1-9-0-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.6.0 - 1.8.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-9-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Server URL templating in OpenAPI specification file** + + Now you can use templating for the server URL in a supplied OpenAPI specification. You must supply the full explicit `basePath` as part of the server URL in the OpenAPI specification file. + + When creating an API proxy using an OAS file, the following values will not be editable in the web interface if they are provided via the OAS spec file: + + ```json + servers: + url: http://{server}.hostname.com/api/{version} + variables: + server: + default: customers + version: + default: v1 + basePathVersionAppendRule: + default : none + stripBasePathVersion: + default : false + ``` + +- {{% icon-feature %}} **OpenAPI specification support for OAuth2 JWT assertion policy** + + You can now specify an OAuth2 JWT assertion policy to apply to the API Proxy being created using an OpenAPI specification file. + +- {{% icon-feature %}} **Backend server configuration from OpenAPI specification file** + + You can provide the backend server configuration for upstream servers in an OpenAPI specification file using extensions specific to API Connectivity Manager. See the [Publish an API Proxy]({{< relref "/nms/acm/getting-started/publish-api-proxy.md#publish-api-proxy-with-spec" >}}) documentation. + + +### Resolved Issues{#1-9-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} A proxy deployed with a `specRef` field (OAS) and `basePathVersionAppendRule` set to other than `NONE` may cause versions to appear twice in the deployed location block [(36666)]({{< relref "/nms/acm/releases/known-issues.md#36666" >}}) +- {{% icon-resolved %}} Resources deployed to a Developer Portal which has had its database reset cannot be updated or removed [(43140)]({{< relref "/nms/acm/releases/known-issues.md#43140" >}}) +- {{% icon-resolved %}} Certificates associated with empty instance groups can be deleted, resulting in a broken reference in the API Connectivity Manager module [(43671)]({{< relref "/nms/acm/releases/known-issues.md#43671" >}}) +- {{% icon-resolved %}} Deployment fails due to duplicate locations [(43673)]({{< relref "/nms/acm/releases/known-issues.md#43673" >}}) +- {{% icon-resolved %}} Cannot use TLS enabled backend with HTTP backend-config policy [(44212)]({{< relref "/nms/acm/releases/known-issues.md#44212" >}}) + +### Known Issues{#1-9-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.8.0 + +July 27, 2023 + +### Upgrade Paths {#1-8-0-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.5.0 - 1.7.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-8-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Advanced security policy for proxies** + + You can use the [Advanced Security policy]({{< relref "/nms/acm/how-to/policies/advanced-security.md" >}}) to add a pre-defined NGINX App Protect to your deployment. This enhancement allows you to specify the rules for each API. + +- {{% icon-feature %}} **Publish APIs using OpenAPI Specification version 3.0 or 3.1** + + Now, you can publish APIs using OpenAPI Specification version 3.0 or 3.1 + +- {{% icon-feature %}} **Added `matchRule` field to the `route` items in `proxyConfig.ingress`** + + The `matchRule` field is now available in the `route` items in `proxyConfig.ingress`. This field is optional and allows you to define a path matching rule for advanced routes. + + The OpenAPI Specification now supports the `x-acm-match-rule` extension for defining match rules for paths within routes. If you don't specify a value for this extension, it will default to `EXACT`. The only allowed values for `matchRule` are the strings `EXACT` and `PREFIX`. + + +### Changes in Default Behavior{#1-8-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **Proxy labels removed** + + Labels on proxies were added with future use cases in mind although without a current need. The proxy labels have been removed to avoid confusion as to their purpose. + + +### Resolved Issues{#1-8-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Environments with WAF enabled may transition to a Failed status when a Developer Portal cluster is added. [(43231)]({{< relref "/nms/acm/releases/known-issues.md#43231" >}}) + +### Known Issues{#1-8-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.7.0 + +June 21, 2023 + +### Upgrade Paths {#1-7-0-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.4.0 - 1.6.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-7-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Advanced Security Policy** + + The new [Advanced Security policy]({{< relref "/nms/acm/how-to/policies/advanced-security.md" >}}) can be used to add a pre-defined NGINX App Protect configuration to your deployment. Doing so will apply the rules specified in the policy to your APIs. + +- {{% icon-feature %}} **Option added to allow API proxy to ignore invalid headers** + + The [Request Header Specification policy]({{< relref "/nms/acm/how-to/policies/request-header-specification.md" >}}) allows headers with (.) and (\_) characters to be proxied to backend services. + + By default, NGINX server will drop all headers that contain (.) and (\_) characters in the header name. Though not common, it is a legal character in headers. This feature will allow users to instruct NGINX to allow such headers to be proxied. + +- {{% icon-feature %}} **Regex support added to access control routing claims** + + Access control routing claims can be arrays. For example, roles and groups are typically represented as an array. You can now use a regular expression to match against claims embedded in arrays. + +- {{% icon-feature %}} **Ingress routing rules now allow using regular expressions** + + Regular expressions are now supported in routing rules. This will enable routing of requests that match against strings like `?wsdl`. + + +### Resolved Issues{#1-7-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} The routes filter under the proxy metrics page won’t work with params [(42471)]({{< relref "/nms/acm/releases/known-issues.md#42471" >}}) +- {{% icon-resolved %}} Multiple entries selected when gateway proxy hostnames are the same [(42515)]({{< relref "/nms/acm/releases/known-issues.md#42515" >}}) + +### Known Issues{#1-7-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.6.0 + +May 11, 2023 + +### Upgrade Paths {#1-6-0-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.3.0 - 1.5.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-6-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Create security policies using an OAS specification** + + With the latest update, you can now create APIKey and Basic Auth security policies using an OAS specification. This enhancement streamlines the process for creating policies, reduces errors, and improves system security. API Connectivity Manager and NGINX can be integrated into the build pipeline where you generate OpenAPI specs. + +- {{% icon-feature %}} **New buffer settings were added to the HTTP Backend Configuration Proxy policy to enhance performance** + + With the latest HTTP Backend Configuration Proxy policy update, you can now modify the size and location of buffer temporary files or turn off buffering altogether. This enhancement offers greater flexibility and control to API Connectivity Manager users, allowing them to optimize their system's performance and improve the overall end-user experience. + +- {{% icon-feature %}} **Gain deeper insights into your environments with enhanced analytics and metrics** + + With this release, you can view more information about your environments. This includes the number of clusters and runtimes, the number of APIs available, and the total amount of data transmitted in and out of each cluster. Additionally, you can view graphs displaying crucial analytics, including traffic metrics, which can help you better understand your system's performance. + + +### Resolved Issues{#1-6-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} CORS policy doesn't support proxying preflight requests to the backend when combined with an authentication policy [(34449)]({{< relref "/nms/acm/releases/known-issues.md#34449" >}}) +- {{% icon-resolved %}} TLS setting on listener is not reset when TLS policy is removed [(41426)]({{< relref "/nms/acm/releases/known-issues.md#41426" >}}) +- {{% icon-resolved %}} Developer Portal: When typing the links to use for the footer, the text boxes keep losing focus [(41626)]({{< relref "/nms/acm/releases/known-issues.md#41626" >}}) +- {{% icon-resolved %}} Array values in token claims are treated as string values [(42388)]({{< relref "/nms/acm/releases/known-issues.md#42388" >}}) + +### Known Issues{#1-6-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.5.0 + +March 28, 2023 + +### Upgrade Paths {#1-5-0-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.2.0 - 1.4.1 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-5-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Use role-based access control for enhanced security and governance** + + With new [built-in RBAC roles for API Connectivity Manager]({{< relref "/nim/admin-guide/rbac/overview-rbac.md#build-in-roles" >}}), administrators can grant or restrict user access to workspaces and features, empowering teams to manage their own workflows. + + {{}} + - [Set Up RBAC for API Owners]({{< relref "/nms/acm/tutorials/rbac-api-owners.md" >}}) + - [Set Up RBAC for Infra Admins]({{< relref "/nms/acm/tutorials/rbac-infra-admins.md" >}}) + {{}} + +- {{% icon-feature %}} **Multiple hostname support** + + Proxy clusters can be shared across multiple environments (hostnames). + +- {{% icon-feature %}} **Secure handling of sensitive data** + + API Connectivity Manager now provides enhanced security for sensitive data, including credentials used in APIKeys, Basic Auth, OAuth2, and JWT policies. All secrets are stored in a secure Vault and encrypted for added protection. + +- {{% icon-feature %}} **Runtime state sharing in an API gateway or Developer Portal** + + Administrators can use [cluster-wide policies]({{< relref "/nms/acm/how-to/policies/cluster-wide-config.md" >}}) to configure uniform settings across all instances in the cluster, such as worker connections, hash table size, and keepalive settings, to optimize performance. Furthermore, using the [Cluster Zone Sync policy]({{< relref "/nms/acm/how-to/policies/cluster-zone-sync.md" >}}), the cluster can be configured to share the runtime state and sync data across all instances, allowing for cluster-wide rate limits and sticky sessions. + +- {{% icon-feature %}} **Performance improvements for the web interface** + + A number of improvements have been made to how the web interface queries the backend services when fetching data. + +- {{% icon-feature %}} **Add a Health Check policy to your gRPC proxy to ensure optimal performance** + + The [gRPC proxy can be enabled with a Health Check policy]({{< relref "/nms/acm/how-to/policies/grpc-policies.md#health-check" >}}), allowing it to check the health status of backend gRPC services and route requests accordingly. + +- {{% icon-feature %}} **Improved certificate handling** + + API Connectivity Manager will not generate new certificates if any have already been specified in the TLS policy; instead, ACM will reference the existing certificates. In this way, wildcard certificates may be employed. + + +### Security Updates{#1-5-0-security-updates} + +{{< important >}} +For the protection of our customers, NGINX doesn’t disclose security issues until an investigation has occurred and a fix is available. +{{< /important >}} + +This release includes the following security updates: + +- {{% icon-resolved %}} **Instance Manager vulnerability CVE-2023-1550** + + NGINX Agent inserts sensitive information into a log file ([CVE-2023-1550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-1550)). An authenticated attacker with local access to read NGINX Agent log files may gain access to private keys. This issue is exposed only when the non-default trace-level logging is enabled. + + NGINX Agent is included with NGINX Instance Manager, and used in conjunction with API Connectivity Manager and the Security Monitoring module. + + This issue has been classified as [CWE-532: Insertion of Sensitive Information into Log File](https://cwe.mitre.org/data/definitions/532.html). + + - Mitigation: + + - Avoid configuring trace-level logging in the NGINX Agent configuration file. For more information, refer to the [Configuring the NGINX Agent]({{< relref "/nms/nginx-agent/install-nginx-agent.md#configuring-the-nginx-agent ">}}) section of NGINX Management Suite documentation. If trace-level logging is required, ensure only trusted users have access to the log files. + + - Fixed in: + + - NGINX Agent 2.23.3 + - Instance Manager 2.9.0 + + For more information, refer to the MyF5 article [K000133135](https://my.f5.com/manage/s/article/K000133135). + + +### Changes in Default Behavior{#1-5-0-changes-in-behavior} +This release has the following changes in default behavior: + +- {{% icon-feature %}} **ACL IP Policy denies IP addresses by default** + + Updates the ACL IP policy to deny IP addresses by default instead of allowing them by default. + + +### Resolved Issues{#1-5-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Configurations aren't pushed to newly onboarded instances if another instance is offline [(40035)]({{< relref "/nms/acm/releases/known-issues.md#40035" >}}) +- {{% icon-resolved %}} The Proxy Cluster API isn't ready to be used [(40097)]({{< relref "/nms/acm/releases/known-issues.md#40097" >}}) + +### Known Issues{#1-5-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.4.1 + +February 02, 2023 + +### Upgrade Paths {#1-4-1-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.1.0 - 1.4.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-4-1-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Resolved Issues{#1-4-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Cluster and Environment deletion issues when Portal Docs are published [(40163)]({{< relref "/nms/acm/releases/known-issues.md#40163" >}}) + +### Known Issues{#1-4-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.4.0 + +January 23, 2023 + +### Upgrade Paths {#1-4-0-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.1.0 - 1.3.1 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-4-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Allow or deny access to APIs for specified consumers** + + Control access to APIs to prevent unauthorized requests from designated consumers. + +- {{% icon-feature %}} **OAuth2 Introspection policy now supports token claim verification** + + API admins can configure an OAuth2 Introspection policy with token claim verification. If the value of an introspected token claim matches the values in the policy configuration, the request will be allowed to proceed to the backend. If not, the request will be denied, and `403 Forbidden` will be returned. + +- {{% icon-feature %}} **Adds support for NGINX Plus R28** + + API Connectivity Manager 1.4.0 is compatible with NGINX Plus R28. For requirements related to NGINX Management Suite and API Connectivity Manager, please refer to the [Technical Specifications]({{< relref "/nim/fundamentals/tech-specs.md" >}}) guide. + + +### Resolved Issues{#1-4-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} A JWT token present in a query parameter is not proxied to the backend for advanced routes [(39328)]({{< relref "/nms/acm/releases/known-issues.md#39328" >}}) +- {{% icon-resolved %}} OIDC policy cannot be applied alongside a proxy authentication policy [(39604)]({{< relref "/nms/acm/releases/known-issues.md#39604" >}}) + +### Known Issues{#1-4-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.3.1 + +December 16, 2022 + +### Upgrade Paths {#1-3-1-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.0.0 - 1.3.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-3-1-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Resolved Issues{#1-3-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Developer Portal backend information is unintentionally updated when editing clusters within an environment [(39409)]({{< relref "/nms/acm/releases/known-issues.md#39409" >}}) +- {{% icon-resolved %}} The Inbound TLS policy breaks when upgrading from API Connectivity Manager 1.2.0 to 1.3.0. [(39426)]({{< relref "/nms/acm/releases/known-issues.md#39426" >}}) +- {{% icon-resolved %}} The web interface doesn't pass the `enableSNI` property for the TLS backend policy [(39445)]({{< relref "/nms/acm/releases/known-issues.md#39445" >}}) + +### Known Issues{#1-3-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.3.0 + +December 12, 2022 + +### Upgrade Paths {#1-3-0-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.0.0 - 1.2.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-3-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Configure access-controlled routing** + + API lifecycle management requires routing API traffic with fine-level control, which is something that token-based authentication schemes that leverage JWT claims do well. Permissions can be encoded as custom claims in the token. Then, once the API proxy validates the token (JWT), it can access all the fields in the token as variables. Decisions can be made based on matching the claims. + + - Applying Fine-Grained Access Control + + API Owners can apply fine-grained access control and restrict access to their APIs based on specific claims in the token. The policy can be configured to enforce fine-grained control for specific routes or be fine-tuned to support particular methods per route. + + - Header-Based Routing + + Routing decisions can be made based on headers in the incoming requests. API owners can configure rules and conditions that must be matched before routing requests. + + See [Configure Access Control Routing]({{< relref "/nms/acm/how-to/policies/access-control-routing.md" >}}) to learn how to restrict access to your application servers based on JWT claims or header values. + +- {{% icon-feature %}} **Use the web interface to publish and manage gRPC services** + + With API Connectivity Manager 1.2, we introduced support for [publishing and managing gRPC services]({{< relref "/nms/acm/how-to/policies/grpc-policies.md" >}}). Now, in this release, we extend that capability to the web interface. + + You can secure gRPC services with the following policies: + + - gRPC environment policies + + - Error Response Format + - Log Format + - Proxy Response Headers + - Request Body Size Limit + - Request Correlation ID + - TLS Backend + - TLS Inbound + + - gRPC proxy policies: + + - ACL IP Restriction + - APIKey Authentication + - Basic Authentication + - GRPC Backend Config + - JSON Web Token Assertion + - OAuth2 Introspection + - Proxy Request Headers + - Rate Limit + +- {{% icon-feature %}} **Secure communication between API Connectivity Manager and Developer Portal with mTLS** + + API Connectivity Manager communicates with the Developer Portal host to publish API docs and create API credentials. Now, PlatformOps can secure this communication channel by enabling mTLS between the hosts. + + Previously, mTLS required a TLS backend policy on the internal portal proxy cluster. API Connectivity Manager 1.3 removes that restriction. The TLS inbound policy on the internal portal allows providing a client certificate for API Connectivity Manager when mTLS is enabled. API Connectivity Manager presents this client certificate when connecting to the Developer Portal, identifying itself as a trusted client. + +- {{% icon-feature %}} **Other Enhancements** + + - **Improved policy layout** + + The Policy user interface has been improved with highlights for the different policy sections. + + - **NGINX Management Suite config changes are preserved during upgrade** + + Upgrades no longer overwrite customized configurations unless instructed to by the user. + + - **Support for chained certificates** + + Infrastructure administrators can now upload public certificates in PEM format, along with an optional list of intermediate certificates for validating the public certificate. + + - **Support for SNI requirements from hosted services** + + API owners can now use the OAuth2 policy with hosted Identity Provider services that enforce Server Name Indication (SNI). + + +### Resolved Issues{#1-3-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} No validation when conflicting policies are added [(34531)]({{< relref "/nms/acm/releases/known-issues.md#34531" >}}) +- {{% icon-resolved %}} Installing NGINX Agent on Ubuntu 22.04 LTS fails with `404 Not Found` error [(35339)]({{< relref "/nms/acm/releases/known-issues.md#35339" >}}) +- {{% icon-resolved %}} New users are unable to see pages even though they have been given access. [(36607)]({{< relref "/nms/acm/releases/known-issues.md#36607" >}}) +- {{% icon-resolved %}} Portals secured with TLS policy require additional environment configuration prior to publishing API docs [(38028)]({{< relref "/nms/acm/releases/known-issues.md#38028" >}}) +- {{% icon-resolved %}} The user interface is erroneously including irrelevant information on the TLS inbound policy workflow [(38046)]({{< relref "/nms/acm/releases/known-issues.md#38046" >}}) + +### Known Issues{#1-3-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.2.0 + +October 18, 2022 + +### Upgrade Paths {#1-2-0-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.0.0 - 1.1.1 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-2-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Restrict access to APIs based on IP address** + + Using the [ACL-IP policy]({{< relref "/nms/acm/how-to/policies/api-access-control-lists.md" >}}), API owners can now restrict access to APIs based on IP addresses. APIs can be protected by quickly blocking rogue requests from certain IPs or allowing access to only known IPs. + +- {{% icon-feature %}} **Secure API access with OAuth2 tokens** + + API Owners can [restrict access to their APIs with OAuth2 tokens]({{< relref "/nms/acm/how-to/policies/introspection.md" >}}) by swapping an opaque token for claims or a JWT token to be proxied to the backend service. The policy can be configured to grant access to APIs after having the tokens introspected. In addition, the claims in the token can be extracted and forwarded to the backend service. + + {{}}Learn how to [set up an OAuth2 Introspection policy with Keycloak]({{< relref "/nms/acm/tutorials/introspection-keycloak.md" >}}) as the authorization server.{{}} + +- {{% icon-feature %}} **Enhanced API documentation on developer portal** + + The API documentation published to the Developer Portal now displays detailed security schema information for each API. + +- {{% icon-feature %}} **Support for HTTP/2** + + To improve the performance and efficiency of client-server interactions, HTTP/2 can be enabled on the [API proxies]({{< relref "/nms/acm/getting-started/publish-api-proxy.md#set-up-api-proxy" >}}). With HTTP/2 enabled, API Proxies will continue to maintain backward compatibility with older browsers. + +- {{% icon-feature %}} **Improved visualizations for resource credentials** + + API owners can now view the origin of resource credentials. The source field indicates where the credentials were created. For security reasons, the credentials created on the Developer Portal will be masked, but the API owners can view the origin of the resource credentials. + +- {{% icon-feature %}} **Express API payload size with unit of measure** + + The maximum allowed size for the client request body can now be configured in bytes, kilobytes(K) or megabytes(M). + + The `maxRequestBodySizeLimit` attribute of the policy is deprecated and will be removed in API Connectivity Manager 1.3.0. `Size` is the new attribute that supports bytes, megabytes(M), and kilobytes(K). The default setting is 1M. + +- {{% icon-feature %}} **Database backup included in support packages** + + The [Developer Portal support package]({{< relref "/nms/support/support-package.md" >}}) now includes the option to back up the PostgreSQL database. + +- {{% icon-feature %}} **Publish and manage gRPC services - preview release** + + {{}}This is a **preview** feature for you to try out. You shouldn't use preview features for production purposes.{{}} + + To handle gRPC traffic, you can now [publish and manage gRPC proxies]({{< relref "/nms/acm/how-to/services/publish-grpc-proxy.md" >}}). + + Publish gRPC proxies and route gRPC traffic to support the following use cases: + + - Simple RPC (single request‑response) + - Response‑streaming RPC + - Request‑streaming RPC + - Bidirectional‑streaming RPC + - Route to all services in a gRPC service package + - Route to a single gRPC service + - Route to individual gRPC methods + - Route to multiple gRPC services + - Respond to errors with custom gRPC error response format policy + +- {{% icon-feature %}} **Out-of-the-box protection for Developer Portals** + + Developer Portals are now deployed with out-of-the-box protection against rapid requests/overuse and server fingerprinting: + + 1. Protection against server fingerprinting + + The proxy response header policy is now applied by default to a Developer Portal. The default policy disables server tokens from being returned in the proxy response. + + 2. Protection against rapid requests and over-use + + To protect the portal application, the default rate limit policy limits the number of requests a client can make in a time period. Platform admins can customize the policy to meet their SLAs. + +- {{% icon-feature %}} **Support for multi-host deployment pattern for Developer Portals** + + Developer Portals can support multiple deployment patterns. The portal backend API service can be scaled to multiple hosts and can be load-balanced using host IP addresses or internal DNS. + + To support the deployment patterns, `configs -> proxyConfig -> backends ` object has been introduced in the Portal Proxy runtime. The existing `backend` object in the `proxyCluster` object of the Portal Proxy runtime is being deprecated and will not be available in the next major release version. + + +### Resolved Issues{#1-2-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} Enums are not supported in Advanced Routing. [(34854)]({{< relref "/nms/acm/releases/known-issues.md#34854" >}}) +- {{% icon-resolved %}} Unable to delete an environment that is stuck in a Configuring state. [(35546)]({{< relref "/nms/acm/releases/known-issues.md#35546" >}}) +- {{% icon-resolved %}} Credentials endpoint is disabled by default [(35630)]({{< relref "/nms/acm/releases/known-issues.md#35630" >}}) +- {{% icon-resolved %}} Ratelimit policy cannot be applied with OAuth2 JWT Assertion policy. [(36095)]({{< relref "/nms/acm/releases/known-issues.md#36095" >}}) +- {{% icon-resolved %}} Using labels to specify the backend is partially available [(36317)]({{< relref "/nms/acm/releases/known-issues.md#36317" >}}) +- {{% icon-resolved %}} To see updates to the Listener's table, forced refresh of the cluster details page is required. [(36540)]({{< relref "/nms/acm/releases/known-issues.md#36540" >}}) + +### Known Issues{#1-2-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.1.1 + +August 31, 2022 + +### Upgrade Paths {#1-1-1-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.0.0 - 1.1.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-1-1-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Stability and performance improvements** + + This release includes stability and performance improvements. + + +### Resolved Issues{#1-1-1-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} OIDC policy doesn't work with Auth0 Identity Providers [(36058)]({{< relref "/nms/acm/releases/known-issues.md#36058" >}}) +- {{% icon-resolved %}} Traffic is not secured between the API Proxy and backend servers [(36714)]({{< relref "/nms/acm/releases/known-issues.md#36714" >}}) +- {{% icon-resolved %}} Advanced routing ignores the Context Root setting for backend proxies [(36775)]({{< relref "/nms/acm/releases/known-issues.md#36775" >}}) + +### Known Issues{#1-1-1-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.1.0 + +August 18, 2022 + +### Upgrade Paths {#1-1-0-upgrade-paths} + +API Connectivity Manager supports upgrades from these previous versions: + +- 1.0.0 + +If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. + + + +
    + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-1-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **Advanced Cluster Management** + + Including more than one proxy cluster with the same hostname in an environment replicates configuration across all clusters and assists with blue-green deployments. With advanced cluster management, you can use a load balancer in front of the clusters to slowly move to the newer version of the API gateway. For example, one cluster may belong to NGINX Plus version R26 and another to R27. See the [Technical Specifications]({{< relref "/nim/fundamentals/tech-specs.md#data-plane-dev-portal" >}}). + +- {{% icon-feature %}} **Advanced Routing feature is available now** + + Advanced routing feature is available now. You can use it to publish an API Proxy and route specific URIs/endpoints precisely to a backend service. Advanced routing with OAS Specification allows you to import a specification file, parse all the URIs/endpoints in the file and publish API proxy by routing each URI/endpoint precisely to a backend service. To use the advanced routing feature without an OAS specification file, add the URI/endpoints while publishing the API proxy. See the [Advanced Configurations]({{< relref "/nms/acm/how-to/services/publish-api.md#advanced-configurations" >}}) section. + +- {{% icon-feature %}} **SQLite is supported for Developer Portal** + + SQLite is now supported as a database for [Developer Portal installations]({{< relref "/nms/acm/getting-started/add-devportal.md" >}}). + +- {{% icon-feature %}} **Support for NGINX Plus Release 27 (R27)** + + This release supports NGINX Plus Release 27 (R27) version for Data Plane instances. See the [Technical Specifications]({{< relref "tech-specs.md" >}}). + + +### Resolved Issues{#1-1-0-resolved-issues} +This release fixes the following issues. Select an issue's ID link to view its details. + +- {{% icon-resolved %}} JWT Assertion policy accepts an empty string value for tokenName property [(35419)]({{< relref "/nms/acm/releases/known-issues.md#35419" >}}) +- {{% icon-resolved %}} Environment is in a premature success state even though all proxy clusters may not be onboarded [(35430)]({{< relref "/nms/acm/releases/known-issues.md#35430" >}}) +- {{% icon-resolved %}} Cannot add, remove, or edit proxy clusters from an environment that has a published API proxy [(35463)]({{< relref "/nms/acm/releases/known-issues.md#35463" >}}) +- {{% icon-resolved %}} Features in the web interface are not displayed after uploading license [(35525)]({{< relref "/nms/acm/releases/known-issues.md#35525" >}}) +- {{% icon-resolved %}} DEVPORTAL_OPTS in /etc/{default,sysconfig}/nginx-devportal does not work if value has multiple words [(36040)]({{< relref "/nms/acm/releases/known-issues.md#36040" >}}) + +### Known Issues{#1-1-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + +--- + +## 1.0.0 + +July 19, 2022 + + +
    + Dependencies with Instance Manager + +{{< include "tech-specs/acm-nim-dependencies.md" >}} + +
    + + +### What's New{#1-0-0-whats-new} +This release includes the following updates: + +- {{% icon-feature %}} **API Connectivity Manager is now available** + + - Create and manage isolated workspaces for business units, development teams, etc., so each team can develop and deploy at its own pace without affecting other teams. + - Create and manage API infrastructure in isolated workspaces. + - Create and manage production and non-production environments within team workspaces and control who can access APIs at various lifecycle stages. For example, keep APIs under development private and publish production-ready APIs for public access. + - Enforce uniform security policies across all workspaces by applying global policies. + - Create Developer Portals that align with your brand, with custom color themes, logos, and favicons. + - On-board your APIs, publish to an API gateway, and publish your API documentation to the Developer Portal. + - Let teams apply policies to their API proxies to provide custom quality of service for individual applications. + - On-board API documentation by uploading an OpenAPI spec. + - Publish your API docs to a Developer Portal without giving the public access to your API. + - Monitor system and traffic metrics at the instance level. + - Self-service credential issuance for API Keys and Basic Authentication. + - Test API calls to your system using the "Try it out" feature in the Developer Portal. + + +### Known Issues{#1-0-0-known-issues} + +You can find information about known issues in the [Known Issues]({{< relref "/nms/acm/releases/known-issues.md" >}}) topic. + diff --git a/content/nms/acm/troubleshooting.md b/content/nms/acm/troubleshooting.md new file mode 100644 index 000000000..55565f1a8 --- /dev/null +++ b/content/nms/acm/troubleshooting.md @@ -0,0 +1,108 @@ +--- +description: This topic describes possible issues users might encounter when using + API Connectivity Manager. When possible, suggested workarounds are provided. +docs: DOCS-1222 +doctypes: +- reference +tags: +- docs +title: Troubleshooting +toc: true +weight: 1000 +--- + +## System returns `403 Forbidden` error for authorized resources + +### Description + +Users are unable to access API Connectivity Manager features that they've been granted permission for. + +The system returns errors similar to the following examples: + +- Web interface error: "ACM license not found." + +- API error: "Error accessing resource: forbidden. Please contact the system administrator. User has not been granted `READ` permission." + +### Resolution + +New roles require a minimum of `READ` access for the **Licensing** feature. Without `READ` access for **Licensing**, users will be unable to access pages for which they have been granted permission; instead, the system will return `403 Forbidden` errors as licensing errors. + +--- + +## API Connectivity Manager module doesn't show up in the web interface + +### Description + +After installing the API Connectivity Manager module, the module doesn't appear in the F5 NGINX Management Suite web interface. + +### Resolution + +- Force refresh the web page. +- Restart the API Connectivity Manager service: + + ```bash + sudo systemctl restart nms-acm + ``` + +--- + +## Routing traffic fails with `RangeError: Maximum call stack size exceeded` in the data plane error logs + +### Description + +After deploying an API Proxy using a large OpenAPI Specification or a large number of advanced routes, traffic fails to route to the backend service and instead returns a `404` error. Failed requests trigger `js exception: RangeError: Maximum call stack size exceeded` in the data plane logs. + +The number API proxy advanced routes which can be deployed to a single API proxy is dependent on the complexity of the configuration, so it is not possible to give an exact limit; however, the table below illustrates some limits based on example configurations. For example, if all of your routes support a single method and have two non-enum query parameters, your configuration should be able to support up to 440 routes per API proxy. Enum parameters are not illustrated in the table below but will reduce the number of supported routes more significantly than a non-enum parameter. + +{{< bootstrap-table "table table-striped table-bordered" >}} + +| Path/Route methods | Query parameters | Supported number of Paths/Advanced Routes | +| ------------------ | ---------------- | ----------------------------------------- | +| 1 | 0 | 1100 | +| 1 | 1+ | 440 | +| 2 | 0 | 550 | +| 2 | 1+ | 220 | +| 3 | 0 | 360 | +| 3 | 1+ | 140 | +| 4 | 0 | 270 | +| 4 | 1+ | 110 | +| | | | + +{{< /bootstrap-table >}} + +{{< note >}} +The numbers in the above table are provided only as an example. Other factors may impact the total supported number of routes. +{{< /note >}} + +### Resolution + +- The limitations are for a single API proxy. Splitting your configuration and deploying it across multiple API proxies may resolve the issue. For example: + - Given an OpenAPI specification with contains 1500 routes with a single method and no parameters + - 800 paths in the specification begin with `/v1`, and 700 begin with `/v2` + - Splitting the definition into two definitions, with one containing all of the `/v1` paths and the other containing all of the `/v2` paths, should allow deployment of two API proxies which cover all of the paths defined, each one below the 1100 route limit +- Replacing enum parameters with non-enum parameters may increase the number of routes which can be deployed + +--- + +## Can't delete API Connectivity Manager objects after upgrading NGINX instances + +### Description + +After upgrading NGINX Plus instances to R27, you may not be able to delete Environments, Proxies, or Dev Portals in the API Connectivity Manager module. + +### Resolution + +Try restarting the NGINX Agent after upgrading NGINX. + +- To restart the NGINX Agent, run the following command: + + ``` bash + sudo systemctl restart nginx-agent + ``` + +--- + +## How to Get Support + +{{< include "support/how-to-get-support.md" >}} + diff --git a/content/nms/acm/tutorials/_index.md b/content/nms/acm/tutorials/_index.md new file mode 100644 index 000000000..ced5d4727 --- /dev/null +++ b/content/nms/acm/tutorials/_index.md @@ -0,0 +1,6 @@ +--- +description: "The guides in this section feature end-to-end examples that will help you get the most out of F5 NGINX Management Suite API Connectivity Manager." +title: Tutorials +weight: 600 +url: /nginx-management-suite/acm/tutorials/ +--- diff --git a/content/nms/acm/tutorials/advanced-routing.md b/content/nms/acm/tutorials/advanced-routing.md new file mode 100644 index 000000000..beb03b0fb --- /dev/null +++ b/content/nms/acm/tutorials/advanced-routing.md @@ -0,0 +1,100 @@ +--- +description: Learn how to create dynamic routes for your deployments using F5 NGINX Management + Suite API Connectivity Manager. +docs: DOCS-1218 +doctypes: +- tutorial +tags: +- docs +title: Set Up Advanced Routing +toc: true +weight: 110 +--- + +## Overview + +This tutorial will show you how to create dynamic routes for your proxy deployments using the 'Advanced Routing' feature. +This allows routing to different backend services based on URI, HTTP method, etc. + +### Intended Audience + +This guide is meant for NGINX Management Suite users who can add/modify Proxy deployments and want to create dynamic route matching configurations. + +### How do I publish a Proxy with Advanced Routing? + +Follow the steps on the [Publish an HTTP API]({{< relref "/nms/acm/how-to/services/publish-api.md" >}}) section to publish a proxy. + +--- + +## Use Case + +Jane Smith has started a new job as an API developer for the Product-Search team in a hardware manufacturing company. +Jane needs to change the current catch-all route to more granular routes to support the new API endpoints she has added to the product. +These endpoints will take a mixture of `Query`, `Path`, and `Header` parameters. Jane would like to interact with different backend services based on the routes and parameters provided. + +### Workflow + +In the steps that follow, we will: + +- Create an API Gateway proxy to route the traffic to the backend services. +- Add Advanced Routing rules to allow granular control over the traffic based on the passed parameters. + +--- + +## Before You Begin + +To complete the instructions in this guide, you need the following: + +- [API Connectivity Manager is installed]({{< relref "/nms/acm/how-to/install-acm.md" >}}), [licensed]({{< relref "/nim/admin-guide/license/add-license.md" >}}), and running +- One or more [Service workspaces]({{< relref "/nms/acm/how-to/services/publish-api.md#create-a-service-workspace" >}}) +- One or more [Proxies]({{< relref "/nms/acm/how-to/services/publish-api.md" >}}) + +--- + +## Built-In Role + +API Connectivity Manager comes pre-configured with an [ACM API Owner]({{< relref "/nms/acm/tutorials/rbac-api-owners.md" >}}) role suitable for API Owners (The individuals or teams who are responsible for designing, creating, and maintaining APIs). + +--- + +## Example: Create An Advanced Route + +In our Proxy configuration form (found via a Proxy Create or a Proxy Edit), we will select the `Ingress` section in the left menu to see the options available to configure our proxy ingress. + +Select the **Next** button. On the next screen, we have the options related to `basepath` and `version`. At the bottom of this section, there is an expandable panel to add an `Advanced Route`; select the `Add Route` link to continue. + +This section shows several configuration options. For the purpose of this example, we will focus on the following: + +- `Match URI` +- `HTTP Method` +- `Parameters` + +We are going to create a route that can take two `integer` IDs in the path; for example, `/customer/123/order/1234`. We are going to do this by adding the following parameters: + +In the `Match URI` field add the following value: `/customer/{customerID}/order/{orderID}`. This configures our URI with placeholders for the path parameters `customerID` and `orderID`. + +Expand the `HTTP Method` menu, and select `GET` for this config. The `HTTP Method` parameter allows us to configure which HTTP Method this route will match for. So in this case, a `POST` to `/customer/123/order/1234` will not match and will return a `404` (or a `405` depending on your config). +You can route to different backend services for the same URI but different HTTP methods using the `TargetBackendServiceLabel` parameter, which will associate the config to a specific backend service and the `HTTP Method` parameter combination. + +In the `Parameters` section, select the `Add Parameter` button to see some new config options: + +- `Name` is the name of the parameter in the URI; this must match the placeholder value provided in `Match URI` (in the web interface, the validation will show an error if there is a mismatch). +We need to add one entry for `customerID` and another for `orderID` by selecting the `Add Parameter` button again. + +The `In` field indicates where the parameter will be passed; the options are `PATH`, `QUERY`, and `HEADER`. + +- `PATH` indicates that the parameter will be passed as a path parameter, for example, `/customer/{id}}`. +- `QUERY` indicates that the parameter will be passed as a query parameter, for example, `/customer?customerID=123`. +- `HEADER` indicates that it will be passed as a header with the `Name` field as the header key. + +For this example, we will use `PATH` parameters. + +`Schema Type` defines the type of parameter that will be passed, for example, `STRING`, `INTEGER`, and others which are supplied in a dropdown through the UI or in the API documentation if using the API. +For this example, we will be using `INTEGER`. + +The `Enums` option lets you limit the number of options to be allowed to match on; if anything else is passed, it doesn't match. +We won't be using `Enums` for this example. + +Now that we have added our route, we can select `Add` and `Save and Publish` on the next page. Our changes will be deployed, and we should now be able to resolve our new endpoint! + +--- diff --git a/content/nms/acm/tutorials/aws-deploy.md b/content/nms/acm/tutorials/aws-deploy.md new file mode 100644 index 000000000..3fbea5284 --- /dev/null +++ b/content/nms/acm/tutorials/aws-deploy.md @@ -0,0 +1,347 @@ +--- +description: Learn how to set up the base infrastructure required to deploy NGINX + Management Suite API Connectivity Manager in Amazon Web Services (AWS). +docs: DOCS-896 +tags: +- docs +title: Amazon Web Services Deployment Guide +toc: true +weight: 300 +--- + +{{< shortversions "1.1.0" "latest" "acmvers" >}} + +## Overview + +This guide walks you through the steps needed to set up the necessary infrastructure in Amazon Web Services (AWS) for a proof of concept environment for API Connectivity Manager. The options presented in this guide for creating AWS Instances keep cost in mind and prefer the minimum requirements for running a fully functional API Connectivity Manager environment. +Keep in mind that production environments may require larger instance sizes and incur greater costs. + +### Before You Begin + +- Make sure you have an AWS account. + +{{< important >}}Because the [minimum requirement for the F5 NGINX Management Suite host]({{< relref "/nim/fundamentals/tech-specs#system-sizing" >}}) requires 2 CPU and 4GB RAM (NOT a free tier size), completing this deployment guide will incur charges from AWS according to their price plan.{{< /important >}} + +### Hosts Specs + +The AWS instance types and storage capacity used in this guide are based on the [NGINX Management Suite Technical Specs]({{< relref "/nim/fundamentals/tech-specs#system-sizing" >}}). + +{{}} + +| Hosts | AWS Instance Type | AWS Storage | +|---------------------------------|-------------------|--------------| +| NGINX Management Suite Host | t3.medium | 100GB | +| Data Plane Host | t2.micro | 10GB | +| Developer Portal Host | t2.micro | 10GB | + +{{}} +Table 1.1 Host Sizing + +## Provision AWS Instances + +Complete the tasks in this section to set up the following resources in AWS: + +1. [Virtual Private Cloud](https://docs.aws.amazon.com/vpc/) +1. [EC2 Instances](https://docs.aws.amazon.com/ec2/) + +The instances you create by the end of this guide are: + +1. NGINX Management Suite Host +1. Data Plane Host +1. Developer Portal Host + +### Configure VPC + +This section creates and configures the AWS Virtual Private Cloud (VPC) as described below. If your existing VPC is able to allow the following types of traffic, skip this section. + +1. Be able to access the internet (for install) +1. Be able to establish an SSH connection from your workstation to the EC2 Instances +1. Have HTTPS traffic enabled + - To allow NGINX Management Suite user interface and/or API access + - Communication between Data Plane or Developer Portal host and NGINX Management Suite host +1. Have HTTP traffic enabled + - To allow access to the Developer Portal from a workstation + - To allow traffic for gateway proxy from a workstation + +#### Create a New VPC + +Take the steps below to create a new VPC: + +1. Go to to the **VPC** Service. +1. Select **Create VPC**. +1. In the **VPC setting** section, provide the **Name** (optional) and **IPv4 CIDR**. +1. Select **Create VPC**. + +#### Create a New Subnet + +Take the steps below to create a new subnet: + +1. On the left menu, select **Virtual private cloud > Subnets**. +1. Select **Create subnet**. +1. In the **VPC** section, select the newly created VPC from above. +1. In the **Subnet settings**, provide the **Subnet name** (optional) and **IPv4 CIDR block**. +1. Select **Create subnet**. + +#### Create a New Internet Gateway + +Take the steps below to create a new internet gateway: + +1. On the left menu, select **Virtual private cloud > Internet Gateways**. +1. Select **Create internet gateway**. +1. On the main window of the newly created internet gateway, select **Actions > Attach to VPC**. +1. Select the VPC created from above. +1. Select **Attach internet gateway**. + +{{< note >}}The Internet Gateway is what provides a public subnet internet access.{{< /note >}} + +#### Create a New Route Table + +Take the steps below to create a route table, add a route entry that defaults to the internet gateway created above, and associate a subnet with this route table: + +1. On the left menu, select **Virtual private cloud > Route tables**. +1. Select **Create route table**. +1. Associate this route table to the VPC created from above. +1. Select **Create route table**. +1. Scroll down on the main window of the newly created route table then select **Edit routes**. +1. Select **Add route**. + 1. Provide `0.0.0.0/0` for the **Destination**. + 1. Select the **Internet Gateway** created from above. + 1. Select **Save changes**. +1. Scroll down on the main window on the same route table then select the **Subnet associations** tab. +1. Select **Edit subnet associations**. +1. Select the subnet created from above. +1. Select **Save changes**. + +### Create EC2 Instances + +At this point, the VPC created above is available when creating EC2 Instances. + +Before creating the EC2 instances, create your **Key Pair** and **Security Groups** if they do not already exist. The reason why they are required is described below. +{{}} + +| AWS Object | Reason | +|----------------------|--------------------------------------------------------------------------------------------| +| Key Pair | This is used to allow SSH connections in to EC2 Instances. | +| Security Groups | The security group needs to enable HTTP/S traffic and allow SSH traffic from your IP. | + +{{}} +Table 1.2 Key Pair and Security Groups Reasoning + +#### Create a Key Pair + +Take the steps below to create a **Key Pair**. + +1. Go to the **EC2** Service. +1. On the left menu, select **Network & Security > Key Pairs**. +1. You can either create a new Key Pair or import your own. + - To create a new Key Pair: + 1. Select **Create key pair**. + 1. Provide the **Name**. **Key pair type**, and **Private key file format**. + - To import your existing Key Pair: + 1. Select **Actions > Import key pair**. + 1. Provide the key pair **Name** and your public key content. + +#### Create a Security Group + +The table below summarizes the two security groups that you should create. + +{{}} + +| Security Group Name | HTTP | HTTPS | SSH | +|------------------------------|---------------|----------------|-------------| +| sg-controller | NA | Anywhere-IPv4 | My IP | +| sg-data | Anywhere-IPv4 | Anywhere-IPv4 | My IP | + +{{}} +Table 1.3 AWS Inbound Security Group Source + +{{< warning >}}Selecting **Anywhere-IPv4** as the _Source_ for **HTTP** and **HTTPS** will cause the instances placed inside your Security Group to be publicly accessible. If this is not suitable for you or your organization, please ensure that appropriate restrictions are in place. {{< /warning >}} + +{{< note >}}Select **My IP** as the _Source_ for **SSH** to prevent SSH connection attempts by anyone other than yourself. + +If you are not allowed to do this, refer to the [Terminal Access Using Session Manager](#session-manager) section below.{{< /note >}} + +
    + +Each host needs to be associated to a security group. The mapping of each host to the correct security group is shown below. +{{}} + +| Host | Security Group | +|------------------------------|----------------| +| NGINX Management Suite Host | sg-controller | +| Data Plane Host | sg-data | +| Developer Portal Host | sg-data | + +{{}} +Table 1.4 Host to Security Group Mapping + +
    + +Take the steps below to create a security group for access. Repeat these steps twice, once for **sg-controller** and once for **sg-data**. + +1. Go to the **EC2** Service. +1. On the left menu, select **Network & Security > Security Groups**. +1. Select **Create security group**. +1. In the **Basic details** section, provide the **Security group name**, **Description**, and select the **VPC** created from above. +1. In the **Inbound rules** section, refer to each traffic **Type** that corresponds to the security group being created from Table 1.2 above. +1. The **Outbound rules** should already allow all traffic by default. If it isn't, modify the rules so that it allows all traffic. +1. Select **Create security group**. + +#### Create EC2 Instance + +Take the steps below to create an EC2 Instance. Repeat these steps three times, once for each host shown in [Table 1.1]({{< relref "#hosts-specs" >}}). + +1. Go to the **EC2** Service. +1. On the left menu, select **Instances > Instances**. +1. Select **Launch Instances**. +1. Provide the **Name** of your instance. +1. In the **Application and OS Images** section, select your [supported OS of choice]({{< relref "/nim/fundamentals/tech-specs#distributions" >}}). +1. Select your instance size in the **Instance Type** section. Refer to [Table 1.1]({{< relref "#hosts-specs" >}}) for the suggested size of your host. Refer to [Technical Specifications]({{< relref "/nim/fundamentals/tech-specs#system-sizing" >}}) for additional information. +1. In the **Key pair (login)** section, select the key pair that was created above. +1. In the **Network settings** section, select the **Edit** button. + - Provide your **VPC** and **Subnet** information. + - Select **Enable** for **Auto-assign public IP**. + - Select **Select existing security group**. + - Provide the security group created above shown in Table 1.4 that corresponds to your host for **Common security groups**. +1. In the **Configure Storage** section, select the storage amount required by your host. Refer to [Table 1.1]({{< relref "#hosts-specs" >}}) for guidance to determine the suggested size. GP2 storage is suitable. Refer to [Technical Specifications]({{< relref "/nim/fundamentals/tech-specs#system-sizing" >}}) for additional information. + +#### Access EC2 Instance + +Take the steps below to obtain the public IP so you can access the instance through an SSH connection. + +1. Select **Instances > Instances** on the left menu. +1. Select your instance. +1. Select the **Details** tab. +1. The public IP address is shown in the **Public IPv4 address** section. This is the IP that allows external access (such as from your workstation) to the selected EC2 Instance. + {{< note >}}It takes about a minute for the instance to become available for SSH connections.{{< /note >}} + +## NGINX Management Suite Host Installation + +Follow the [NGINX Management Suite Installation Guide]({{< relref "/nim/deploy/_index.md" >}}) to install both the **Instance Manager Module** and the **API Connectivity Manager Module**. The **Security Module** is not required for this demo. + +## NGINX Data Plane host + +Follow the steps in the [Set Up an API Gateway Environment]({{< relref "/nms/acm/getting-started/add-api-gateway" >}}) guide to create an API Gateway and deploy it to your NGINX data plane host. + +## NGINX Developer Portal host + +Follow the steps in the [Set Up a Developer Portal Environment]({{< relref "/nms/acm/getting-started/add-devportal" >}}) guide to create a Developer Portal and deploy it to your NGINX Dev Portal host. + +## Terminal Access Using Session Manager (Optional) {#session-manager} + +AWS allows you to enable SSH traffic to a specific Source IP Address which is much safer than exposing it to everyone on the internet. Even though exposing it to one IP may be good enough, it might not be sufficient for your company policy. It is possible to completely disable SSH traffic yet still have terminal access to your EC2 Instances. There are different ways of doing this, and one way covered here is using [AWS System Manager Session Manger](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). + +There are two methods of gaining terminal access via Session Manager: + +1. AWS Management Console +2. AWS Command Line Interface Tool + +Whichever method you decide, you need to take the following steps to properly configure your instances to allow connections from AWS Session Manager. Before continuing, ensure the [Session Manager Prerequisites](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-prerequisites.html) are met. + +### IAM Role + +You must create a new IAM Role that grants Session Manager access to EC2 Instances. This will be associated with the EC2 Instances needing terminal access. Take the instructions below to create an IAM Role for Session Manager. + +1. Log in to your AWS Account on your web browser. +1. Go to the **IAM** service. +1. On the left menu, select **Access management > Roles**. +1. Select **Create role**. +1. In the **Trusted entity type** section, select **AWS service**. +1. In the **Use case** section, select **EC2**. +1. Select **Next**. +1. In the **Permissions policies** section, select **AmazonSSMManagedInstanceCore**. You can filter for this name in the filter box. +1. Select **Next**. +1. Provide the **Role name** and **Tag** (optional) for this IAM Role specifically allowing Session Manager access to EC2 Instances. +1. Select **Create role**. + +{{< note >}}Creating an IAM Role from the AWS Management Console and choosing EC2 as the AWS Service also creates an AWS Instance Profile associated with EC2 Instances. Additional details can be found in [the AWS knowledge article](https://aws.amazon.com/premiumsupport/knowledge-center/attach-replace-ec2-instance-profile/).{{< /note >}} + +### Associating IAM Instance Profile to EC2 Instance + +When you associate an _IAM Role_ created from the IAM service to an EC2 Instance, you are really associating an _IAM Instance Profile_. Again, when you create an _IAM Role_ from AWS Management Console and choose EC2 as the AWS Service, it also creates an _IAM Instance Profile_. Take the steps in this section to associate an _IAM Instance Profile_ to an _EC2 Instance_. + +There are two situations that can happen here: + +1. Associating IAM Instance Profile to an existing instance +1. Associating an IAM Instance Profile to a new instance + +#### Associating IAM Instance Profile to Existing Instance + +Take the steps below to associate an IAM Instance Profile to an existing EC2 Instance: + +1. Go to the **EC2** Service. +1. On the left menu, select **Instances > Instances**. +1. Right-click on the instance of interest. +1. Select **Security > Modify IAM role**. +1. Select the **IAM Instance Profile** from the list. + +#### Associating IAM Instance Profile on New Instance + +Associating an IAM Instance Profile to a new instance happens before the instance is created. The steps below assume you know how to get to the page where you provide information for the new instance you are about to create. You see this page after selecting **Launch instances** from **Instances > Instances** on the **EC2** Service. + +1. In the **Advanced details** section, expand the entire section. +1. Select your IAM Instance Profile for **IAM instance profile**. + +### Accessing EC2 Instance Terminal + +You can access the terminal of your instance by either: + +- AWS Management Console +- AWS Command Line Interface Tool + +#### AWS Management Console + +Take the steps below to get terminal access using **Session Manager**. + +1. Go to the **System Manager** Service. +1. On the left menu, select **Node Management > Session Manager**. +1. Verify you are on the **Sessions** tab. +1. Select **Create session**. +1. In the **Target Instances** section, select the instance of interest. +1. Select **Start session**. This takes you to the terminal where you are logged in as `ssm-user`. +1. When you are done, select **Terminate** at the top. + +{{< note >}} If you do not see your instance in the **Target Instances** section: + +- Verify the IAM Instance Profile is associated to your instance. +- Verify the IAM Role has SSM permissions properly configured. +- The instance allows outbound HTTPS traffic to the endpoints shown in the **Connectivity to endpoints** row from the [Session Manager Prerequisites](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-prerequisites.html) page. +- Wait about 15 minutes if you attached the IAM Instance Profile to an existing instance. +{{< /note >}} + +### AWS Command Line Interface Tool + +Another way to get terminal access to instances is through AWS's CLI Tool. + +Take the steps below to fulfill prerequisites for using Session Manager on the command line interface: + +1. Install [AWS CLI Tool](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html). +1. You must also install the [Session Manager Plugin](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). +1. You need **AWS Access Key ID** and **AWS Secret Access Key**, which you can set up by referring to the [AWS CLI Prerequisite](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-prereqs.html) page. + +Take the steps below to get terminal access on an instance: + +1. Run `aws configure` to set up access to your AWS account. + + ```shell + $ aws configure + AWS Access Key ID []: ****************DLVT + AWS Secret Access Key []: ****************666r + Default region name []: + Default output format []: json + ``` + + {{< note >}} If your AWS account is configured to use temporary credentials, you need to provide the `aws_session_token` by running the command below: + + aws configure set aws_session_token {{< /note >}} + +1. Run `aws ssm start-session --target ""` to start a session which provides terminal access. + + ```shell + $ aws ssm start-session --target "" + + Starting session with SessionId: aaaaaaaa-0538f063ab275aeed + $ + ``` + +1. To exit out of the session, type `exit` as if you were going to close a normal terminal screen. diff --git a/content/nms/acm/tutorials/enable-metrics.md b/content/nms/acm/tutorials/enable-metrics.md new file mode 100644 index 000000000..33dcfdf45 --- /dev/null +++ b/content/nms/acm/tutorials/enable-metrics.md @@ -0,0 +1,291 @@ +--- +description: Learn how to enable and use metrics for F5 NGINX Management Suite API Connectivity + Manager. +docs: DOCS-1055 +tags: +- docs +title: Enable Metrics +toc: true +weight: 100 +--- + +{{< shortversions "1.3.0" "latest" "acmvers" >}} + +## Overview + +This guide walks through setting up and using metrics in API Connectivity Manager. + +{{}}The configuration presented in this guide is for demonstration purposes only. Securely configuring environments and proxies in API Connectivity Manager is not in scope for this tutorial but should be given full attention when planning for production use.{{}} + +Currently, only the following metric is available: + +- [Count of proxies in an environment](#count-proxies-in-env) + +As we add new metrics, we'll let you know in the [API Connectivity Manager release notes]({{< relref "/nms/acm/releases/release-notes.md" >}}) and update this topic accordingly. + +--- + +## Before You Begin + +To complete the instructions in this guide, you need the following: + +- Access to a virtual environment +- Four virtual hosts with Linux installed — this guide uses [Ubuntu 20.04 LTS](https://releases.ubuntu.com/focal/). + +
    + Supported Linux distributions + + {{< include "nim/tech-specs/supported-distros.md" >}} + +
    + + {{}}It looks like you have to install both Go and Echo Server. Are there minimum requirements for these? Do we need to explain why these extra apps are needed? Customers installing in Prod might wonder, and they might even need to get approval to install them.{{}} + +--- + +## Host Setup + +This section configures the hosts used in this tutorial. In the following table, you'll find the details of the test environment used in this tutorial's examples. The options presented are the minimum host requirements for running a fully functional test environment. Remember that production environments may need more resources and incur greater costs. + +{{}} + +| Hosts | Virtual Cores | Memory | Storage | IP Address | Hostname | +|-----------------------------|---------------|--------|---------|-------------|-------------| +| F5 NGINX Management Suite Host | 2 vCPUs | 4GB | 100GB | `192.0.2.2` | `acm-ctrl` | +| Data Plane Host | 1 vCPU | 1GB | 10GB | `192.0.2.3` | `data-host` | +| Echo Server | 1 vCPU | 1GB | 10GB | `192.0.2.4` | `echo-host` | + +{{}} + +
    + +### Install NGINX Management Suite & API Connectivity Manager {#install-nsm-acm} + +Follow the steps in the [Installation Guide]({{< relref "/nim/deploy/_index.md" >}}) to set up NGINX Management Suite and API Connectivity Manager. You do not need to configure a Developer Portal for this tutorial. + +### Enable Metrics for API Connectivity Manager + +In `/etc/nms/acm.conf`, uncomment and set the `enable_metrics` property to `true`. + +``` bash +# set to true to enable metrics markers from the acm code +enable_metrics = true +``` + +Run the following command to restart the API Connectivity Manager service: + +```bash +sudo systemctl restart nms-acm +``` + +### Install NGINX Agent on Data Plane Host {#install-agent} + +Run the following commands to install the NGINX Agent on the data plane host, create a new Instance Group called `test-ig`, and add the host to it: + +``` shell +curl --insecure https://192.0.2.2/install/nginx-agent > install.sh \ +&& sudo sh install.sh -g test-ig \ +&& sudo systemctl start nginx-agent +``` + +To ensure that the advanced metrics modules are installed across all data plane hosts, please follow the steps in the [Install NGINX Plus Metrics Module]({{< relref "/nms/nginx-agent/install-nginx-plus-advanced-metrics.md" >}}) guide. + +--- + +### Install Echo Server {#install-echo-server} + +{{< note >}} The server is designed for testing HTTP proxies and clients. It echoes information about HTTP request headers and bodies back to the client. {{}} + +1. [Download and install the latest version of Go](https://go.dev/doc/install) by following the instructions on the official Go website. +2. Run the following commands to install and start [Echo Server](https://github.com/jmalloc/echo-server): + + ```shell + go env -w GO111MODULE=off + go get -u github.com/jmalloc/echo-server/... + PORT=10000 LOG_HTTP_BODY=true LOG_HTTP_HEADERS=true echo-server + ``` + +--- + +## Configure API Connectivity Manager {#amc-config} + +In this section, we use the API Connectivity Manager REST API to set up a proxy in API Connectivity Manager. You need to pass the NGINX Management Suite user credentials in the Basic Authentication header for each REST request. + +### Create Workspaces & Environment {#create-workspace-environment} + +1. To create an Infrastructure Workspace with a minimum configuration, send the following JSON request to the `/infrastructure/workspaces` endpoint: + + ```bash + POST https://192.0.2.2/api/acm/v1/infrastructure/workspaces + ``` + +
    + + **JSON Request** + + ```json + { + "name": "infra-ws" + } + ``` + +1. To create an environment with a minimum configuration, send the following JSON request to the `/infrastructure/workspaces/infra-ws/environments` endpoint. The `proxyClusterName`: `test-ig` is the name of the Instance Group that the data plane host was added to when you [installed the NGINX Agent](#install-agent) above. The `hostnames` array should contain the hostname of the data plane host. + + ```bash + POST https://192.0.2.2/api/acm/v1/infrastructure/workspaces/infra-ws/environments + ``` + +
    + + **JSON Request** + + ```json + { + "name": "demo-env", + "proxies": [ + { + "proxyClusterName": "test-ig", + "hostnames": [ + "data-host" + ] + } + ] + } + ``` + +2. To create a Service Workspace with a minimum configuration, send the following JSON request to the `/services/workspaces` endpoint. + + ```bash + POST https://192.0.2.2/api/acm/v1/services/workspaces + ``` + +
    + + **JSON Request** + + ```json + { + "name": "service-ws" + } + ``` + +### Create a Basic API Proxy {#create-basic-api-proxy} + +1. To create an API proxy with a minimum configuration and the default policies, send the following JSON request to the `/services/workspaces/service-ws/proxies` endpoint. The Proxy service target is our Echo Server. + + ```bash + POST https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies + ``` + + **JSON Request** + + ```json + { + "name": "test-proxy", + "version": "v1", + "proxyConfig": { + "hostname": "data-host", + "ingress": { + "basePath": "/", + "basePathVersionAppendRule": "NONE" + }, + "backends": [ + { + "serviceName": "backend-echo-svc", + "serviceTargets": [ + { + "hostname": "192.0.2.4", + "listener": { + "enableTLS": false, + "port": 10000, + "transportProtocol": "HTTP" + } + } + ] + } + ] + } + } + ``` + +2. To test whether the API Proxy and backend Echo Server are working correctly, send a custom header and dummy JSON body to show these proxied values in the Echo Server response: + + ```bash + POST https://192.0.2.4/my/test/api + HEADERS: + X-NGINX-Test: true + ``` + +
    + + **JSON Request** + + ```json + { + "testKey": "testValue" + } + ``` + +
    + + **Verification** + + If everything is configured correctly in API Connectivity Manager and the Echo Server, the response should be similar to the following example: + + ```bash + Request served by echo-host + + HTTP/1.0 POST /my/test/api + + Host: 192.0.2.4 + Accept: */* + Cache-Control: no-cache + Content-Length: 30 + Content-Type: application/json + X-Correlation-Id: c241b72519e71cf7bce9262910ffbe40 + X-Real-Ip: 192.0.2.1 + X-NGINX-Test: true + + {"testKey": "testValue"} + ``` + +--- + +## Get Count of Proxies in an Environment {#count-proxies-in-env} + +To get the count of active proxies, send the following REST request to the `/infrastructure/workspaces/infra-ws/environments/demo-env/api-count` endpoint: + +```bash +GET https://192.0.2.2/api/acm/v1/infrastructure/workspaces/infra-ws/environments/demo-env/api-count +``` + +If you've successfully configured a proxy the following count is returned. + +Response: + +```json + 1 +``` + +--- + +## View Environment Metrics {#view-env-metrics} + +1. On the left menu, select **Infrastructure**. +2. Select a workspace from the table. +3. Select the Actions menu (represented by an ellipsis, `...`) next to your environment on the **Actions** column. +4. Select **Metrics**. +5. Update the start and end time of the metrics with the **time range selection** on the dashboard overview. +6. To view metrics broken down by cluster in the environment, select the **API Gateway Clusters** tab. + +--- + +## View Proxy Metrics {#view-proxy-metrics} + +1. On the left menu, select **Services**. +2. Select a workspace from the table. +3. Select the Actions menu (represented by an ellipsis, `...`) next to your environment on the **Actions** column. +4. Select **Metrics**. +5. Update the start and end time of the metrics with the **time range selection** on the dashboard overview. +6. Filter by advanced routes with the **advanced route selection** on the dashboard overview. +7. To view metrics broken down by status code in the proxy, select the **API Gateway Clusters** tab. diff --git a/content/nms/acm/tutorials/introspection-keycloak.md b/content/nms/acm/tutorials/introspection-keycloak.md new file mode 100644 index 000000000..74415a802 --- /dev/null +++ b/content/nms/acm/tutorials/introspection-keycloak.md @@ -0,0 +1,1552 @@ +--- +description: Learn how to set up an F5 NGINX Management Suite API Connectivity Manager + OAuth2 Introspection policy with Keycloak as the authorization server. +docs: DOCS-954 +doctypes: +- tutorial +tags: +- docs +title: OAuth2 Introspection with Keycloak +toc: true +weight: 400 +--- + +## Overview + +This tutorial walks through configuring an OAuth2 Introspection policy on an API Proxy in API Connectivity Manager with Keycloak as the authorization server. + +{{}}The configuration presented in this guide is for demonstration purposes only. The secure configuration of Environments and Proxies in API Connectivity Manager, or the secure configuration of Keycloak as the authorization server, is not in scope for this tutorial and should be given full attention when planning for production use.{{}} + +{{}}See the [OAuth2 Introspection Policy]({{< relref "/nms/acm/how-to/policies/introspection.md" >}}) reference guide for a detailed overview of the policy.{{}} + +--- + +## What is OAuth2? + +{{< include "acm/tutorials/what-is-OAuth2.md" >}} + +--- + +## Before You Begin + +To complete the instructions in this guide, you need the following: + +- Access to a virtual environment +- Four virtual hosts with Linux installed - this guide uses [Ubuntu 20.04 LTS](https://releases.ubuntu.com/focal/). + +
    + Supported Linux distributions + + {{< include "nim/tech-specs/supported-distros.md" >}} + +
    + +--- + +## Host Setup + +This section configures the hosts used in this tutorial. In the following table, you'll find the details of the test environment used in this tutorial's examples. The options presented are the minimum host requirements for running a fully functional test environment. Remember that production environments may need more resources and incur greater costs. + +{{}} + +| Hosts | Virtual Cores | Memory | Storage | IP Address | Hostname | +|------------------------------|---------------|--------|---------|---------------|-------------| +| F5 NGINX Management Suite Host | 2 vCPUs | 4GB | 100GB | `192.0.2.2` | `acm-ctrl` | +| Data Plane Host | 1 vCPU | 1GB | 10GB | `192.0.2.3` | `data-host` | +| Echo Server | 1 vCPU | 1GB | 10GB | `192.0.2.4` | `echo-host` | +| Authorization Server | 1 vCPU | 1GB | 10GB | `192.0.2.5` | `auth-host` | + +{{}} + +### Install NGINX Management Suite & API Connectivity Manager {#install-nsm-acm} + +1. Follow the steps in the [Installation Guide]({{< relref "/nim/deploy/_index.md" >}}) to set up NGINX Management Suite and API Connectivity Manager. You do not need to configure a Developer Portal for this tutorial. + +### Install NGINX Agent on Data Plane Host {#install-agent} + +1. Run the following commands to install the NGINX Agent on the data plane host, create a new Instance Group called `test-ig`, and add the host to it: + + ``` shell + curl --insecure https://192.0.2.2/install/nginx-agent > install.sh \ + && sudo sh install.sh -g test-ig \ + && sudo systemctl start nginx-agent + ``` + +### Install Echo Server {#install-echo-server} + +1. [Download and install the latest version of Go](https://go.dev/doc/install) by following the instructions on the official Go website. +2. Run the following commands to install and start [Echo Server](https://github.com/jmalloc/echo-server): + + ```shell + go env -w GO111MODULE=off + go get -u github.com/jmalloc/echo-server/... + PORT=10000 LOG_HTTP_BODY=true LOG_HTTP_HEADERS=true echo-server + ``` + +### Install Authorization Server {#install-auth-server} + +This tutorial uses Keycloak in **Development mode**. Development mode is suitable for people trying out Keycloak for the first time who want to get it up and running quickly. + +Development mode sets the following default configuration: + +- HTTP is enabled +- Strict hostname resolution is disabled +- The cache is set to local (no distributed cache mechanism is used for high availability) +- Theme-caching and template-caching are disabled. + +
    + (Optional) Production mode default configuration + +For all conventional and production use cases, we recommend starting Keycloak in **Production mode**, which follows a "secure by default" principle. + +Production mode sets the following default configuration: + +- HTTP is disabled as transport layer security (HTTPS) is essential +- Hostname configuration is expected +- HTTPS/TLS configuration is expected + +
    + +
    + +1. To install the Keycloak prerequisites run the following commands: + + ```shell + sudo apt-get update + sudo apt-get -y install openjdk-11-jre + ``` + +2. Download and extract the Keycloak tarball: + + ```shell + KEYCLOAK_VERSION=19.0.3 + + curl -L -o keycloak-${KEYCLOAK_VERSION}.tar.gz \ + https://github.com/keycloak/keycloak/releases/download/${KEYCLOAK_VERSION}/keycloak-${KEYCLOAK_VERSION}.tar.gz + tar -zxf keycloak-${KEYCLOAK_VERSION}.tar.gz + rm -rf keycloak-${KEYCLOAK_VERSION}.tar.gz + ``` + +3. Create environment variables for the Keycloak admin username and password: + + {{< important >}} Do not use the example `admin/password` combination in any scenario. Replace the username and password with strong alternatives. {{< /important >}} + + ```shell + export KEYCLOAK_ADMIN= + export KEYCLOAK_ADMIN_PASSWORD= + ``` + +4. Start Keycloak in **Development Mode**: + + ```shell + cd keycloak-${KEYCLOAK_VERSION}/ + bin/kc.sh start-dev + ``` + +--- + +## Configure Keycloak {#configure-keycloak} + +In this section, we'll configure Keycloak as our OAuth2 authorization server. + +### Accessing the Keycloak UI + +Using the Keycloak admin credentials that you configured in the preceding [Install Authorization Server](#install-auth-server) section, you can access and log in to Keycloak web interface by going to: + +- `http://192.0.2.5:8080/admin` + +### Configure a Realm {#configure-realm} + +A _Realm_ manages a set of users, credentials, roles, and groups. A user belongs to and logs in to a Realm. Realms are isolated from one another and can manage and authenticate only the users they control. + +1. To create a Realm, select **Master** in the left navigation bar, then select **Add realm** from the list. +2. Enter the Realm details. For the purposes of this demonstration, our Realm will be called `nginx`. +3. Select **Create** to create the Realm. +4. The **Realm** list in the left navigation bar should now be set to `nginx`. + +### Configure a User {#configure-user} + +_Users_ are entities that can log in to your system. User attributes include an email, username, address, phone number, and birthday. Users can be assigned a group membership and have specific roles assigned to them. + +1. To create a user, select **Users**, then select **Create new User**. +2. Enter the user's details. For the purposes of this demonstration, just the required field **Username** is set as `nginx-user`. +3. Select **Create** to create the user. +4. To set the user's password, select the **Credentials** tab. +5. Select **Set Password**. Enter the desired password in the **Password** and **Password Confirmation** boxes. Set **Temporary** to **OFF**. +6. Select **Save**. +7. Select **Save Password** to confirm the password change. + +### Configure a Client {#configure-client} + +Clients are entities that can ask Keycloak to authenticate a user. Most often, clients are applications and services that use Keycloak to secure themselves and provide a single sign-on solution. Clients can also be entities that request identity information or an access token to invoke other services on the network that are secured by Keycloak. + +To configure a client, take the following steps: + +1. Select **Clients**. You will see a list of pre-created Keycloak clients. To create a new one, select **Create Client**. +2. Enter the details for the client. For the purposes of this demonstration, type `nginx-plus` for the **Client ID**. Leave **Client Type** as the default value **OpenID Connect**. +3. Select **Next** to continue. +4. In the **Capability Config** section of the client configuration, set **Client Authentication** as **On**. +5. Select **Save** to create the client. +6. Select the **Credentials** tab. In the **Client Authenticator** list, choose **Client ID and Secret**. +7. Copy the **Client Secret**. You will need this secret for authenticating the `nginx-plus` client with Keycloak. + +### Configure a Custom Role {#configure-custom-role} + +_Roles_ identify a user type or category. Typical roles in an organization include admin, user, manager, and employee. Applications often assign access and permissions to specific roles rather than individual users, as dealing with users can be too fine-grained and challenging to manage. + +To configure a custom role, take the following steps: + +1. Select **Realm Roles**. You will see a list of pre-created Keycloak roles. To create a new role, select **Create Role**. +2. Type the **Role Name**. For the purposes of this demonstration, use `nginx-keycloak-role` for the role name. +3. Select **Save**. +4. Once a role has been created, you need to assign the role to users. Select **Users**, then select the `nginx-user` user you created in the preceding [Configure a User](#configure-user) steps. +5. Select the **Role Mapping** tab, then select **Assign Role**. +6. Select the checkbox beside the `nginx-keycloak-role` role, then select **Assign**. + +--- + +## Test OAuth2 Token Introspection {#test-oauth2-token-introspection} + +Follow the steps in this section to test the OAuth functionality of Keycloak, token issuing, and token introspection. + +### Get the Keycloak Token Introspection Endpoints {#get-keycloak-introspection-endpoints} + +An introspection endpoint is needed to configure the Introspection policy in API Connectivity Manager. Additionally, a token endpoint is required for users to authenticate and access tokens for introspection. You can retrieve these endpoints using a REST API call to Keycloak. + +#### Structure + +```bash +curl -L -X GET http://{HOST/IP_ADDRESS}:{PORT}/realms/{REALM}/.well-known/openid-configuration +``` + +#### Example + +{{< note >}} `jq` is used in the following examples to format the JSON response from Keycloak in a legible and attractive way. For more information about `jq` , see the [jq GitHub page](https://github.com/stedolan/jq). {{< /note >}} + +```bash +curl -L -X GET http://192.0.2.5:8080/realms/nginx/.well-known/openid-configuration | jq +``` + +JSON Response: + +```json +{ + "issuer": "http://192.0.2.5:8080/realms/nginx", + "authorization_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/auth", + "token_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token", + "introspection_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect", + "userinfo_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/userinfo", + "end_session_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/logout", + "jwks_uri": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/certs", + "check_session_iframe": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/login-status-iframe.html", + "grant_types_supported": [ + "authorization_code", + "implicit", + "refresh_token", + "password", + "client_credentials", + "urn:ietf:params:oauth:grant-type:device_code", + "urn:openid:params:grant-type:ciba" + ] +} +``` + +
    + +### Generate a User Access Token {#generate-user-access-token} + +To generate an access token the below request structure is used: + +#### Structure + +```bash +curl -L -X POST 'http://{HOST/IP_ADDRESS}:{PORT}/realms/{REALM}/protocol/openid-connect/token' \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'client_id=' \ + --data-urlencode 'grant_type=password' \ + --data-urlencode 'client_secret=' \ + --data-urlencode 'scope=openid' \ + --data-urlencode 'username=' \ + --data-urlencode 'password=' +``` + +#### Example + +```bash +curl -L -X POST 'http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token' \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'client_id=nginx-plus' \ + --data-urlencode 'grant_type=password' \ + --data-urlencode 'client_secret=Fa0QIV5uNWD9lC9k5tg64m0WYxZkUwgA' \ + --data-urlencode 'scope=openid' \ + --data-urlencode 'username=nginx-user' \ + --data-urlencode 'password=password' \ + | jq +``` + +JSON Response: + +Keycloak will respond with a JSON object containing an `access_token` for the user `nginx-user`: + +```json +{ +"access_token": "", +"expires_in": 300, +"refresh_expires_in": 1800, +"refresh_token": "", +"token_type": "Bearer", +"id_token": "", +"not-before-policy": 0, +"session_state": "9836f5fd-987f-4875-ac75-f7dd5325047c", +"scope": "openid profile email" +} +``` + +Typically, the `access_token` is passed in requests to a **Resource Server** (API Proxy) as a `Bearer` token in the `Authorization` header. This is the default OAuth2 Introspection policy behavior in API Connectivity Manager. + +
    + +### Introspecting a User Access Token {#introspect-token-test} + +You can mimic the process by which an NGINX client introspects an incoming user `access_token` with Keycloak. + +{{< note >}} Keycloak is configured to accept basic auth credentials from the `nginx-plus` client; in this case, the credentials are formatted as `CLIENT_ID:CLIENT_SECRET`. This combination must be [base64 url encoded](https://www.base64url.com/) before it is passed in the `Authorization` header. {{< /note >}} + +#### Structure + +```shell +curl -L -X POST 'http://{HOST/IP_ADDRESS}:{PORT}/realms/{REALM}/protocol/openid-connect/token/introspect' \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode 'token=' \ + | jq +``` + +#### Example + +```shell +curl -L -X POST 'http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect' \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode 'token=' + | jq +``` + +JSON Response: + +Keycloak responds with a token introspection JSON response with associated claims that NGINX can extract and forward to backend services. + +```json +{ + "active": true, + "exp": 1665585794, + "iat": 1665585494, + "jti": "c8723771-2474-4c94-b155-f78a4583419f", + "iss": "http://192.0.2.5:8080/realms/nginx", + "aud": "account", + "sub": "a95117bf-1a2e-4d46-9c44-5fdee8dddd11", + "typ": "Bearer", + "azp": "nginx-plus", + "session_state": "b7ca9271-02ce-453f-b491-61ec4e648d5d", + "given_name": "", + "family_name": "", + "preferred_username": "nginx-user", + "email_verified": false, + "acr": "1", + "scope": "openid profile email", + "sid": "b7ca9271-02ce-453f-b491-61ec4e648d5d", + "client_id": "nginx-plus", + "username": "nginx-user", + "realm_access": { + "roles": [ + "default-roles-nginx", + "offline_access", + "nginx-keycloak-role", + "uma_authorization" + ] + }, + "resource_access": { + "account": { + "roles": [ + "manage-account", + "manage-account-links", + "view-profile" + ] + } + } +} +``` + +
    + +At this checkpoint in the tutorial, Keycloak is sufficiently configured for token introspection. + +--- + +## Configure API Connectivity Manager {#amc-config} + +In this section, we will use the API Connectivity Manager Rest API to set up a proxy in API Connectivity Manager. You'll need to pass the NGINX Management Suite user credentials in the Basic Authentication header for each REST request. + +### Creating Workspaces & Environment {#create-workspace-environment} + +1. To create an Infrastructure Workspace with a minimum configuration, send the following JSON request to the `/infrastructure/workspaces` endpoint: + + ```bash + POST https://192.0.2.2/api/acm/v1/infrastructure/workspaces + ``` + + **JSON Request** + + ```json + { + "name": "infra-ws" + } + ``` + +1. To create an Environment with a minimum configuration, send the following JSON request to the `/infrastructure/workspaces/infra-ws/environments` endpoint. The `proxyClusterName`, `test-ig`, is the name of the Instance Group that the data plane host was added to when you [installed the NGINX Agent](#install-agent) above. The `hostnames` array should contain the hostname of the data plane host. + + ```bash + POST https://192.0.2.2/api/acm/v1/infrastructure/workspaces/infra-ws/environments + ``` + + **JSON Request** + + ```json + { + "name": "demo-env", + "proxies": [ + { + "proxyClusterName": "test-ig", + "hostnames": [ + "data-host" + ] + } + ] + } + ``` + +3. To create a Service Workspace with a minimum configuration, send the following JSON request to the `/services/workspaces` endpoint. + + ```bash + POST https://192.0.2.2/api/acm/v1/services/workspaces + ``` + + **JSON Request** + + ```json + { + "name": "service-ws" + } + ``` + +### Create a Basic API Proxy {#create-basic-api-proxy} + +1. To create an API proxy with a minimum configuration and no non-default policies, send the following JSON request to the `/services/workspaces/service-ws/proxies` endpoint. The Proxy service target is our echo server. + + ```bash + POST https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies + ``` + + **JSON Request** + + ```json + { + "name": "test-proxy", + "version": "v1", + "proxyConfig": { + "hostname": "data-host", + "ingress": { + "basePath": "/", + "basePathVersionAppendRule": "NONE" + }, + "backends": [ + { + "serviceName": "backend-echo-svc", + "serviceTargets": [ + { + "hostname": "192.0.2.4", + "listener": { + "enableTLS": false, + "port": 10000, + "transportProtocol": "HTTP" + } + } + ] + } + ] + } + } + ``` + +2. To test whether the API Proxy and backend echo server are working correctly, send a custom header and dummy JSON body to show these proxied values in the echo server response. + + ```bash + POST https://192.0.2.4/my/test/api + HEADERS: + X-NGINX-Test: true + ``` + + **JSON Request** + + ```json + { + "testKey": "testValue" + } + ``` + + **Expected Result** + + If everything is configured correctly in API Connectivity Manager and the echo server, the response should be similar to the following example: + + ```bash + Request served by echo-host + + HTTP/1.0 POST /my/test/api + + Host: 192.0.2.4 + Accept: */* + Cache-Control: no-cache + Content-Length: 30 + Content-Type: application/json + X-Correlation-Id: c241b72519e71cf7bce9262910ffbe40 + X-Real-Ip: 192.0.2.1 + X-NGINX-Test: true + + {"testKey": "testValue"} + ``` + +### Upsert OAuth2 Introspection Policy + +1. Upsert the API proxy with an OAuth2 Introspection policy. The default `action.introspectionResponse` type `application/json` is used, so you don't need to define it in the API request body. + + {{< note >}} This shortened request body removes all the default API proxy policies. To maintain these default policies, perform a `GET` request on the proxy before the upsert and copy the policy configuration from the response. {{< /note >}} + + ```bash + PUT https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies/test-proxy + ``` + + **JSON Request** + + ```json + { + "name": "test-proxy", + "version": "v1", + "proxyConfig": { + "hostname": "data-host", + "ingress": { + "basePath": "/", + "basePathVersionAppendRule": "NONE" + }, + "backends": [ + { + "serviceName": "backend-echo-svc", + "serviceTargets": [ + { + "hostname": "192.0.2.4", + "listener": { + "enableTLS": false, + "port": 10000, + "transportProtocol": "HTTP" + } + } + ] + } + ], + "policies": { + "oauth2-introspection": [ + { + "action": { + "introspectionEndpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect" + }, + "data": [ + { + "clientAppID": "nginx-plus", + "clientSecret": "lpBTyPxDORks6RHJ0nfYp6q1CvJzWSkF" + } + ] + } + ] + } + } + } + ``` + +### Testing the Introspection Policy {#test-introspection-policy} + +1. Using the same echo server request from the previous section, test the echo server again: + + ```bash + POST https://192.0.2.4/my/test/api + ``` + + If you've successfully configured and applied the OAuth2 Introspection policy, the request is blocked from reaching the backend, and `401 Unauthorized` is returned. + + JSON Response: + + ```json + { + "message": "Unauthorized", + "status": "401" + } + ``` + +2. Using the Keycloak user you created in the [Configure a User](#configure-user) section above, obtain an access token from Keycloak. Follow the steps you completed in the [Generate a User Access Token](#generate-user-access-token) section. +3. Copy the `access_token` in the JSON response that's returned from Keycloak. +4. In the next request to the echo server, add a request header with the following details: + + - key: `Authorization` + - value: `Bearer `, where `` is the token you copied in step 3. + + The `` is prefixed with `Bearer` because it's passed as a [bearer token](https://www.rfc-editor.org/rfc/rfc6750) to the API proxy. + + If the OAuth2 Introspection policy has been configured and applied successfully, the request is blocked from reaching the backend, and `401 Unauthorized` is returned. + + ```bash + POST https://192.0.2.4/my/test/api + HEADERS: + Authorization: 'Bearer ' + ``` + + The access token is taken from the `Authorization` header and introspected against the Keycloak introspection endpoint defined in the policy configuration. If the OAuth2 server responds with `"active": true` in the introspection response, the request proceeds to the backend. The response should look like the following example: + + ```bash + Request served by echo-host + + HTTP/1.0 POST /my/test/api + + Host: default_http_a4334620-226b-491d-8503-e0724bdf5521 + Accept: */* + Accept-Encoding: gzip, deflate, br + Cache-Control: no-cache + Connection: close + Content-Length: 30 + Content-Type: application/json + X-Correlation-Id: ffc5dc656e220a20fa57835e0653f19f + X-Token-Exp: 1666003414 + X-Token-Scope: openid email profile + X-Token-Username: nginx-user + ``` + +There are a few things to note here: + +- The default headers changed because we removed the default request headers policy when upserting the Introspection policy. This won't happen in environments where default policies are included in the upsert request, in which case, the default request headers are forwarded to the backend services. +- There are new headers proxied to the backend `X-Token-Exp`, `X-Token-Scope`, and `X-Token-Scope`. These are the default claims defined in the policy configuration value `action.forwardedClaimsInProxyHeader`, and values are taken from the Identify Provider (IdP) introspection response to the in-flight request. +- There is no `Authorization` header in the request forwarded to the backend. This is because NGINX strips the incoming user access token from the header or query parameters regardless of the key used. + +If you pass an inactive or invalid token and perform the same request above, the request is blocked from reaching the backend, and `403 Forbidden` is returned.` + +```json +{ + "message": "Forbidden", + "status": "403" +} +``` + +You can check the logs on the data host to determine the cause of the `403 Forbidden` response. There may be several reasons for a forbidden response message; however, the user only sees `403 Forbidden` in all cases, except where no access token is provided. In that case, the response is `401 Not Authorized`. + +```bash +cat /var/log/nginx/data-host-error.log +2022/10/17 10:23:11 [error] 35643#35643: *15 js: OAuth introspection access_token not provided. +2022/10/17 11:24:30 [error] 39542#39542: *49 js: OAuth token introspection found inactive token. +``` + +### Custom Token Placement & Key {#custom-token-placement-key} + +You can configure the Introspection policy to let users pass their access token as a header or query parameter using any key name. By default, the access token is given in the `Authorization` header as a bearer token. The `Bearer` prefix is required when the access token is passed in the `Authorization` header. If the header is changed from this default `Authorization` value, passing a `Bearer` prefix will render the request invalid. + +`action.clientTokenSuppliedIn` configures how the access token is passed in the user request; `action.clientTokenName`configures the key under which the access token is extracted from the user request. + +1. Upsert the proxy with an updated Introspection policy configuration, where the access token is passed in the request headers as `apiAccessToken`. The default value for `action.clientTokenSuppliedIn` is `HEADER`, so you don't need to include it in the API request body. + + ```bash + PUT https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies/test-proxy + ``` + + **JSON Request** + + ```json + { + "name": "test-proxy", + "version": "v1", + "proxyConfig": { + "hostname": "data-host", + "ingress": { + "basePath": "/", + "basePathVersionAppendRule": "NONE" + }, + "backends": [ + { + "serviceName": "backend-echo-svc", + "serviceTargets": [ + { + "hostname": "192.0.2.4", + "listener": { + "enableTLS": false, + "port": 10000, + "transportProtocol": "HTTP" + } + } + ] + } + ], + "policies": { + "oauth2-introspection": [ + { + "action": { + "introspectionEndpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect", + "clientTokenName": "apiAccessToken" + }, + "data": [ + { + "clientAppID": "nginx-plus", + "clientSecret": "lpBTyPxDORks6RHJ0nfYp6q1CvJzWSkF" + } + ] + } + ] + } + } + } + ``` + +2. In the next request to the echo server, change the request header so the access token is passed in `apiAccessToken`. + + ```bash + POST https://192.0.2.4/my/test/api + HEADERS: + apiAccessToken: '' + ``` + + The request should proceed to the backend service as expected, and the echo server should respond in turn. + +3. Upsert the proxy with an updated Introspection policy configuration, where the access token is passed in the query arguments as `queryAuthz` with `action.clientTokenSuppliedIn` set to `QUERY`. + + ```bash + PUT https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies/test-proxy + ``` + + ```json + { + "name": "test-proxy", + "version": "v1", + "proxyConfig": { + "hostname": "data-host", + "ingress": { + "basePath": "/", + "basePathVersionAppendRule": "NONE" + }, + "backends": [ + { + "serviceName": "backend-echo-svc", + "serviceTargets": [ + { + "hostname": "192.0.2.4", + "listener": { + "enableTLS": false, + "port": 10000, + "transportProtocol": "HTTP" + } + } + ] + } + ], + "policies": { + "oauth2-introspection": [ + { + "action": { + "introspectionEndpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect", + "clientTokenSuppliedIn": "QUERY", + "clientTokenName": "queryAuthz" + }, + "data": [ + { + "clientAppID": "nginx-plus", + "clientSecret": "lpBTyPxDORks6RHJ0nfYp6q1CvJzWSkF" + } + ] + } + ] + } + } + } + ``` + +4. In the next request to the echo server, remove the custom request header `apiAccessToken` and pass the access token in the query argument `queryAuthz`. + + ```bash + POST https://192.0.2.4/my/test/api?queryAuthz= + ``` + + The request should proceed to the backend service as expected, and the echo server should respond in turn. Similar to passing the access token as a header, the user's access token is stripped from the in-flight request before it's forwarded to the backend service. + +### Token Caching {#token-caching} + +OAuth2 token introspection is provided by the Identify Provider (IdP) at a JSON/REST endpoint, so the standard response is a JSON object with HTTP status 200. When this response is keyed against the access token, it becomes highly cacheable. + +You can configure NGINX to cache a copy of the introspection response for each access token. Then, the next time the same access token is presented, NGINX serves the cached introspection response instead of making an API call to the IdP. Token caching vastly improves overall latency for subsequent requests. You can manage how long cached responses are used to mitigate the risk of accepting an expired or recently revoked access token. For example, suppose an API client typically makes a burst of several API calls over a short period. In that case, a cache validity of 10 seconds might be sufficient to provide a measurable improvement in user experience. + +#### Security Considerations {#security-considerations} + +{{}}There are some security considerations to keep in mind when enabling token caching. For example, a shorter cache expiration time is more secure since the resource servers must query the introspection endpoint more frequently; however, the increased number of queries may put a load on the endpoint. Longer expiration times, by comparison, open a window where a token may actually be expired or revoked but still be able to be used at a resource server for the remaining cache time. + +You can mitigate these situations by never caching the value beyond the token's expiration time. For example, in Keycloak, the default token duration is **300 seconds**. This should be the upper limit of token caching in the Introspection policy configuration.{{}} + +#### Token Caching Setup {#token-caching-setup} + +You can configure token caching in the Introspection policy by setting the `action.cacheIntrospectionResponse` value. An NGINX unit-of-time measurement is expected in seconds, minutes, or hours. By default, token caching is enabled for a five-minute (`5m`) cache duration. Setting the value to `0s`, `0m`, or `0h` disables caching. + +1. Upsert the proxy with an Introspection policy configuration to set a token cache duration of ten seconds (`10s`). + + ```bash + PUT https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies/test-proxy + ``` + + ```json + { + "name": "test-proxy", + "version": "v1", + "proxyConfig": { + "hostname": "data-host", + "ingress": { + "basePath": "/", + "basePathVersionAppendRule": "NONE" + }, + "backends": [ + { + "serviceName": "backend-echo-svc", + "serviceTargets": [ + { + "hostname": "192.0.2.4", + "listener": { + "enableTLS": false, + "port": 10000, + "transportProtocol": "HTTP" + } + } + ] + } + ], + "policies": { + "oauth2-introspection": [ + { + "action": { + "introspectionEndpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect", + "cacheIntrospectionResponse": "10s" + + }, + "data": [ + { + "clientAppID": "nginx-plus", + "clientSecret": "lpBTyPxDORks6RHJ0nfYp6q1CvJzWSkF" + } + ] + } + ] + } + } + } + ``` + +2. Send a request to the echo server API proxy with the provided access token. The introspection token response will be cached in the data host. + +3. To verify whether token caching is successful, locate the token cache Realm on the data host. Realms follow the pattern `tokens__